OpenASIP 2.2
Loading...
Searching...
No Matches
TCEISelLowering.cc
Go to the documentation of this file.
1/*
2 Copyright (c) 2002-2013 Tampere University.
3
4 This file is part of TTA-Based Codesign Environment (TCE).
5
6 Permission is hereby granted, free of charge, to any person obtaining a
7 copy of this software and associated documentation files (the "Software"),
8 to deal in the Software without restriction, including without limitation
9 the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 and/or sell copies of the Software, and to permit persons to whom the
11 Software is furnished to do so, subject to the following conditions:
12
13 The above copyright notice and this permission notice shall be included in
14 all copies or substantial portions of the Software.
15
16 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 DEALINGS IN THE SOFTWARE.
23 */
24/**
25 * @file TCETargetLowering.cpp
26 *
27 * Implementation of TCETargetLowering class.
28 *
29 * @author Veli-Pekka Jääskeläinen 2007 (vjaaskel-no.spam-cs.tut.fi)
30 * @author Mikael Lepistö 2009 (mikael.lepisto-no.spam-tut.fi)
31 * @author Pekka Jääskeläinen 2010
32 * @author Heikki Kultala 2011-2012 (heikki.kultala-no.spam-tut.fi)
33 */
34
35#include <assert.h>
36#include <string>
37#include "tce_config.h"
38#include <llvm/IR/Function.h>
39#include <llvm/IR/DerivedTypes.h>
40#include <llvm/IR/Intrinsics.h>
41#include <llvm/IR/CallingConv.h>
42#include <llvm/CodeGen/TargetLowering.h>
43#include <llvm/CodeGen/CallingConvLower.h>
44#include <llvm/CodeGen/SelectionDAG.h>
45#include <llvm/CodeGen/MachineFrameInfo.h>
46#include <llvm/CodeGen/MachineRegisterInfo.h>
47#include <llvm/CodeGen/MachineInstrBuilder.h>
48#include <llvm/Support/raw_ostream.h>
49
50#include <llvm/Target/TargetLoweringObjectFile.h>
51
52//#include <llvm/Config/config.h>
53
54#include "TCEPlugin.hh"
55#include "TCERegisterInfo.hh"
56#include "TCETargetMachine.hh"
58#include "TCESubtarget.hh"
59#include "TCEISelLowering.hh"
60#include "tce_config.h"
62#include "Application.hh"
63#include "Machine.hh"
64#include "AddressSpace.hh"
65#include "MachineInfo.hh"
66
67#include "llvm/Support/ErrorHandling.h"
68
69#include <iostream> // DEBUG
70
71
72#ifdef TARGET64BIT
73#define DEFAULT_TYPE MVT::i64
74#define DEFAULT_IMM_INSTR TCE::MOVI64sa
75#define DEFAULT_SIZE 8
76#define DEFAULT_REG_CLASS TCE::R64IRegsRegClass
77#else
78#define DEFAULT_TYPE MVT::i32
79#define DEFAULT_IMM_INSTR TCE::MOVI32ri
80#define DEFAULT_SIZE 4
81#define DEFAULT_REG_CLASS TCE::R32IRegsRegClass
82#endif
83
84
85
86using namespace llvm;
87
88//===----------------------------------------------------------------------===//
89// Calling Convention Implementation
90//===----------------------------------------------------------------------===//
91
92#include "TCEGenCallingConv.inc"
93
94#include "ArgRegs.hh"
95
96SDValue
98 CallingConv::ID CallConv, bool isVarArg,
99 const SmallVectorImpl<ISD::OutputArg> &Outs,
100 const SmallVectorImpl<SDValue> &OutVals,
101 SDLOC_PARAM_TYPE dl, SelectionDAG &DAG) const
102{
103
104 // CCValAssign - represent the assignment of the return value to locations.
105 SmallVector<CCValAssign, 16> RVLocs;
106
107 // CCState - Info about the registers and stack slot.
108 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
109 RVLocs, *DAG.getContext());
110
111 // Analize return values.
112 CCInfo.AnalyzeReturn(Outs, RetCC_TCE);
113
114 SmallVector<SDValue, 4> RetOps(1, Chain);
115
116 SDValue Flag;
117
118 // Copy the result values into the output registers.
119 for (unsigned i = 0; i != RVLocs.size(); ++i) {
120 CCValAssign &VA = RVLocs[i];
121 assert(VA.isRegLoc() && "Can only return in registers!");
122
123 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
124 OutVals[i], Flag);
125
126 // Guarantee that all emitted copies are stuck together with flags.
127 Flag = Chain.getValue(1);
128 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
129 }
130
131 RetOps[0] = Chain; // Update chain.
132
133 // Add the flag if we have it.
134 if (Flag.getNode())
135 RetOps.push_back(Flag);
136
137 return DAG.getNode(
138 TCEISD::RET_FLAG, dl, MVT::Other, ArrayRef<SDValue>(RetOps));
139}
140
141/**
142 * Lowers formal arguments.
143 */
144SDValue
146 SDValue Chain,
147 CallingConv::ID CallConv,
148 bool isVarArg,
149 const SmallVectorImpl<ISD::InputArg> &Ins,
151 SelectionDAG &DAG,
152 SmallVectorImpl<SDValue> &InVals) const
153{
154
155 MachineFunction &MF = DAG.getMachineFunction();
156 auto& frameInfo = MF.getFrameInfo();
157 MachineRegisterInfo &RegInfo = MF.getRegInfo();
158
159 // Assign locations to all of the incoming arguments.
160 SmallVector<CCValAssign, 16> ArgLocs;
161 CCState CCInfo(
162 CallConv, isVarArg, DAG.getMachineFunction(),
163 ArgLocs, *DAG.getContext());
164
165 CCInfo.AnalyzeFormalArguments(Ins, CC_TCE);
166
167 const unsigned *CurArgReg = ArgRegs, *ArgRegEnd = ArgRegs + argRegCount;
168 const unsigned maxMemAlignment = isVarArg ? 4 : tm_.stackAlignment();
169 unsigned ArgOffset = 0;
170
171 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
172 SDValue ArgValue;
173 CCValAssign &VA = ArgLocs[i];
174 // FIXME: We ignore the register assignments of AnalyzeFormalArguments
175 // because it doesn't know how to split a double into two i32 registers.
176 EVT ObjectVT = VA.getValVT();
177 MVT sType = ObjectVT.getSimpleVT().SimpleTy;
178
179 if (sType == MVT::i1 || sType == MVT::i8 || sType == MVT::i16 ||
180#ifdef TARGET64BIT
181 sType == MVT::i64 ||
182#endif
183 sType == MVT::i32) {
184 // There may be a bug that marked as not used if varargs
185 if (!Ins[i].Used) {
186 if (CurArgReg < ArgRegEnd) {
187 ++CurArgReg;
188 }
189
190 InVals.push_back(DAG.getUNDEF(ObjectVT));
191 } else if (CurArgReg < ArgRegEnd && !isVarArg) {
192 unsigned VReg = RegInfo.createVirtualRegister(
194 MF.getRegInfo().addLiveIn(*CurArgReg++, VReg);
195 SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, DEFAULT_TYPE);
196 if (ObjectVT != DEFAULT_TYPE) {
197 unsigned AssertOp = ISD::AssertSext;
198 Arg = DAG.getNode(
199 AssertOp, dl, DEFAULT_TYPE, Arg,
200 DAG.getValueType(ObjectVT));
201 Arg = DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, Arg);
202 }
203 InVals.push_back(Arg);
204
205 } else {
206 int FrameIdx = frameInfo.CreateFixedObject(
207 DEFAULT_SIZE, ArgOffset, /*immutable=*/true);
208
209 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, DEFAULT_TYPE);
210 SDValue Load;
211 if (ObjectVT == DEFAULT_TYPE) {
212 Load = DAG.getLoad(
213 DEFAULT_TYPE, dl, Chain, FIPtr, MachinePointerInfo());
214 } else {
215 ISD::LoadExtType LoadOp = ISD::SEXTLOAD;
216
217#ifndef LITTLE_ENDIAN_TARGET // big endian extload hack starts
218
219 // TODO: WHAT IS THIS??
220 // TCE IS NO LONGER ALWAYS BIG-ENDIAN!
221 // TCE is big endian, add an offset based on the ObjectVT.
222 unsigned Offset = DEFAULT_SIZE - std::max(
223 #ifdef LLVM_OLDER_THAN_16
224 1UL, ObjectVT.getSizeInBits().getFixedSize()/8);
225 #else
226 1UL, ObjectVT.getSizeInBits().getFixedValue()/8);
227 #endif
228 FIPtr = DAG.getNode(
229 ISD::ADD, dl, DEFAULT_TYPE, FIPtr,
230 DAG.getConstant(Offset, dl, DEFAULT_TYPE));
231
232#endif // big endian hack ends
233
234 Load = DAG.getExtLoad(
235 LoadOp, dl, DEFAULT_TYPE, Chain, FIPtr,
236 MachinePointerInfo(), ObjectVT);
237 Load = DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, Load);
238 }
239 InVals.push_back(Load);
240 }
241 } else if (sType == MVT::f16) {
242 if (!Ins[i].Used) { // Argument is dead.
243 if (CurArgReg < ArgRegEnd) {
244 ++CurArgReg;
245 }
246 InVals.push_back(DAG.getUNDEF(ObjectVT));
247 } else if (CurArgReg < ArgRegEnd && !isVarArg) {
248 unsigned VReg = RegInfo.createVirtualRegister(
249 &TCE::HFPRegsRegClass);
250 MF.getRegInfo().addLiveIn(*CurArgReg++, VReg);
251 SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f16);
252 InVals.push_back(Arg);
253 } else {
254 int FrameIdx = frameInfo.CreateFixedObject(
255 DEFAULT_SIZE, ArgOffset, /*immutable=*/true);
256 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, DEFAULT_TYPE);
257 SDValue Load = DAG.getLoad(
258 MVT::f16, dl, Chain, FIPtr, MachinePointerInfo());
259 InVals.push_back(Load);
260 }
261 } else if (sType == MVT::f32 || sType == MVT::f64) {
262 if (!Ins[i].Used) { // Argument is dead.
263 if (CurArgReg < ArgRegEnd) {
264 ++CurArgReg;
265 }
266 InVals.push_back(DAG.getUNDEF(ObjectVT));
267 } else if (CurArgReg < ArgRegEnd && !isVarArg) { // reg argument
268 auto regClass = sType == MVT::f32 ?
269 &TCE::FPRegsRegClass:
270 &TCE::R64DFPRegsRegClass;
271 unsigned VReg = RegInfo.createVirtualRegister(regClass);
272 MF.getRegInfo().addLiveIn(*CurArgReg++, VReg);
273 SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, sType);
274 InVals.push_back(Arg);
275 } else { // argument in stack.
276 int FrameIdx = frameInfo.CreateFixedObject(
277 DEFAULT_SIZE, ArgOffset, /*immutable=*/true);
278 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, DEFAULT_TYPE);
279 SDValue Load = DAG.getLoad(
280 sType, dl, Chain, FIPtr, MachinePointerInfo());
281 InVals.push_back(Load);
282 }
283 } else if (sType.isVector()) {
284 if (!Ins[i].Used) {
285 InVals.push_back(DAG.getUNDEF(ObjectVT));
286 } else {
287 int FrameIdx = MF.getFrameInfo().CreateFixedObject(
288 sType.getStoreSize(), ArgOffset, true);
289 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, DEFAULT_TYPE);
290 SDValue Load = DAG.getLoad(
291 sType, dl, Chain, FIPtr, MachinePointerInfo());
292 InVals.push_back(Load);
293 }
294 } else {
295 std::cerr << "Unhandled argument type: "
296 << ObjectVT.getEVTString() << std::endl;
297 std::cerr << "sType size in bits: " << sType.getSizeInBits() << std::endl;
298 std::cerr << "is a vector? " << sType.isVector() << std::endl;
299 assert(false);
300 }
301
302 unsigned argumentByteSize = sType.getStoreSize();
303
304 // Align parameter to stack correctly.
305 if (argumentByteSize <= maxMemAlignment) {
306 ArgOffset += maxMemAlignment;
307 } else {
308 unsigned alignBytes = maxMemAlignment - 1;
309 ArgOffset += (argumentByteSize + alignBytes) & (~alignBytes);
310 }
311 }
312
313 // inspired from ARM
314 if (isVarArg) {
315 /// @todo This probably doesn't work with vector arguments currently.
316 // This will point to the next argument passed via stack.
317
318 VarArgsFrameOffset = frameInfo.CreateFixedObject(
319 DEFAULT_SIZE, ArgOffset, /*immutable=*/true);
320 }
321
322 return Chain;
323}
324
325
326SDValue
327TCETargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
328 SmallVectorImpl<SDValue> &InVals) const {
329
330 SelectionDAG &DAG = CLI.DAG;
331 SDLoc &dl = CLI.DL;
332 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
333 SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
334 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
335 SDValue Chain = CLI.Chain;
336 SDValue Callee = CLI.Callee;
337 bool &isTailCall = CLI.IsTailCall;
338 CallingConv::ID CallConv = CLI.CallConv;
339 bool isVarArg = CLI.IsVarArg;
340
341 // we do not yet support tail call optimization.
342 isTailCall = false;
343
344 (void)CC_TCE;
345
346 const unsigned maxMemAlignment = isVarArg? 4 : tm_.stackAlignment();
347 int regParams = 0;
348 unsigned ArgsSize = 0;
349
350 // Count the size of the outgoing arguments.
351 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
352 EVT ObjectVT = Outs[i].VT;
353 MVT sType = Outs[i].VT.SimpleTy;
354#ifndef TARGET64BIT
355 if (sType == MVT::i1 || sType == MVT::i8 || sType == MVT::i16 ||
356 sType == MVT::i32 || sType == MVT::f16 || sType == MVT::f32) {
357 if (regParams < argRegCount) {
358 regParams++;
359 }
360 } else if (sType == MVT::i64 || sType == MVT::f64) {
361 // Nothing to do.
362 } else if (sType.isVector()) {
363 // Nothing to do.
364 } else {
365 std::cerr << "Unknown argument type: "
366 << ObjectVT.getEVTString() << std::endl;
367 assert(false);
368 }
369#else
370 if (sType == MVT::i1 || sType == MVT::i8 || sType == MVT::i16 ||
371 sType == MVT::i32 || sType == MVT::i64 || sType == MVT::f16 ||
372 sType == MVT::f32 || sType == MVT::f64) {
373 if (regParams < argRegCount) {
374 regParams++;
375 }
376 } else if (sType.isVector()) {
377 // Nothing to do.
378 } else {
379 std::cerr << "Unknown argument type: "
380 << ObjectVT.getEVTString() << std::endl;
381 assert(false);
382 }
383#endif
384
385 unsigned argumentByteSize = sType.getStoreSize();
386
387 // Align parameter to stack correctly.
388 if (argumentByteSize <= maxMemAlignment) {
389 ArgsSize += maxMemAlignment;
390 } else {
391 unsigned alignBytes = maxMemAlignment - 1;
392 ArgsSize += (argumentByteSize + alignBytes) & (~alignBytes);
393 }
394 }
395 Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, dl);
396 SmallVector<SDValue, 8> MemOpChains;
397
398 SmallVector<std::pair<unsigned, SDValue>, argRegCount> RegsToPass;
399
400 unsigned ArgOffset = 0;
401
402 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
403 SDValue Val = OutVals[i];
404 EVT ObjectVT = Val.getValueType();
405 MVT sType = ObjectVT.getSimpleVT().SimpleTy;
406 SDValue ValToStore(0, 0);
407
408#ifndef TARGET64BIT
409 if (sType == MVT::i1 || sType == MVT::i8 || sType == MVT::i16 ||
410 sType == MVT::i32 || sType == MVT::f32 || sType == MVT::f16) {
411 if (RegsToPass.size() >= argRegCount || isVarArg) {
412 ValToStore = Val;
413 }
414 if (RegsToPass.size() < argRegCount) {
415 RegsToPass.push_back(
416 std::make_pair(ArgRegs[RegsToPass.size()], Val));
417 }
418 } else if (sType.isVector()) {
419 ValToStore = Val;
420 } else {
421 std::cerr << "Unknown argument type: "
422 << ObjectVT.getEVTString() << std::endl;
423 assert(false);
424 }
425#else // is 64-bit
426 if (sType == MVT::i1 || sType == MVT::i8 || sType == MVT::i16 ||
427 sType == MVT::i32 || sType == MVT::i64 || sType == MVT::f32 ||
428 sType == MVT::f64) {
429 if (RegsToPass.size() >= argRegCount || isVarArg) {
430 ValToStore = Val;
431 }
432 if (RegsToPass.size() < argRegCount) {
433 RegsToPass.push_back(
434 std::make_pair(ArgRegs[RegsToPass.size()], Val));
435 }
436 } else if (sType.isVector()) {
437 ValToStore = Val;
438 } else {
439 std::cerr << "Unknown argument type: "
440 << ObjectVT.getEVTString() << std::endl;
441 assert(false);
442 }
443#endif
444
445 if (ValToStore.getNode()) {
446 SDValue StackPtr = DAG.getCopyFromReg(
447 Chain, dl, TCE::SP, getPointerTy(
448 getTargetMachine().createDataLayout(), 0));
449 SDValue PtrOff = DAG.getConstant(ArgOffset, dl, DEFAULT_TYPE);
450 PtrOff = DAG.getNode(ISD::ADD, dl, DEFAULT_TYPE, StackPtr, PtrOff);
451
452 MemOpChains.push_back(DAG.getStore(Chain, dl, ValToStore,
453 PtrOff, MachinePointerInfo()));
454 }
455
456 unsigned argumentByteSize = sType.getStoreSize();
457
458 // Align parameter to stack correctly.
459 if (argumentByteSize <= maxMemAlignment) {
460 ArgOffset += maxMemAlignment;
461 } else {
462 unsigned alignBytes = maxMemAlignment - 1;
463 ArgOffset += (argumentByteSize + alignBytes) & (~alignBytes);
464 }
465 }
466
467 // Emit all stores, make sure the occur before any copies into physregs.
468 if (!MemOpChains.empty()) {
469 Chain = DAG.getNode(
470 ISD::TokenFactor, dl, MVT::Other, ArrayRef<SDValue>(MemOpChains));
471 }
472
473 // Build a sequence of copy-to-reg nodes chained together with token
474 // chain and flag operands which copy the outgoing args into registers.
475 // The InFlag in necessary since all emited instructions must be
476 // stuck together.
477 SDValue InFlag;
478
479 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
480 unsigned Reg = RegsToPass[i].first;
481 Chain = DAG.getCopyToReg(Chain, dl, Reg, RegsToPass[i].second, InFlag);
482 InFlag = Chain.getValue(1);
483 }
484
485 // If the callee is a GlobalAddress node (quite common, every direct call is)
486 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
487 // Likewise ExternalSymbol -> TargetExternalSymbol.
488 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
489 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, DEFAULT_TYPE);
490 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
491 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), DEFAULT_TYPE);
492 std::vector<EVT> NodeTys;
493 NodeTys.push_back(MVT::Other); // Returns a chain
494 NodeTys.push_back(MVT::Glue); // Returns a flag for retval copy to use.
495 SDValue Ops[] = { Chain, Callee, InFlag };
496
497 Chain = DAG.getNode(
498 TCEISD::CALL, dl, ArrayRef<EVT>(NodeTys),
499 ArrayRef<SDValue>(Ops, InFlag.getNode() ? 3 : 2));
500
501 InFlag = Chain.getValue(1);
502
503 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, dl, true),
504 DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
505 InFlag = Chain.getValue(1);
506
507 // Assign locations to each value returned by this call.
508 SmallVector<CCValAssign, 16> RVLocs;
509 CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(),
510 RVLocs, *DAG.getContext());
511
512 RVInfo.AnalyzeCallResult(Ins, RetCC_TCE);
513
514 // Copy all of the result registers out of their specified physreg. (only one rv reg)
515 for (unsigned i = 0; i != RVLocs.size(); ++i) {
516 unsigned Reg = RVLocs[i].getLocReg();
517
518 Chain = DAG.getCopyFromReg(Chain, dl, Reg,
519 RVLocs[i].getValVT(), InFlag).getValue(1);
520 InFlag = Chain.getValue(2);
521 InVals.push_back(Chain.getValue(0));
522 }
523
524 return Chain;
525}
526
527/**
528 * The Constructor.
529 *
530 * Initializes the target lowering.
531 */
533 TargetMachine &TM, const TCESubtarget &subt)
534 : TargetLowering(TM), tm_(static_cast<TCETargetMachine &>(TM)) {
535 LLVMTCECmdLineOptions* opts = dynamic_cast<LLVMTCECmdLineOptions*>(
537
538 if (opts != NULL && opts->conservativePreRAScheduler()) {
539 setSchedulingPreference(llvm::Sched::RegPressure);
540 }
541
543 if (hasI1RC_)
544 addRegisterClass(MVT::i1, &TCE::R1RegsRegClass);
545
546#ifdef TARGET64BIT
547 addRegisterClass(MVT::i64, &TCE::R64IRegsRegClass);
548 addRegisterClass(MVT::f64, &TCE::R64DFPRegsRegClass);
549#else
550 addRegisterClass(MVT::i32, &TCE::R32IRegsRegClass);
551#endif
552 addRegisterClass(MVT::f32, &TCE::FPRegsRegClass);
553 addRegisterClass(MVT::f16, &TCE::HFPRegsRegClass);
554
555 setOperationAction(ISD::UINT_TO_FP, MVT::i1 , Promote);
556 setOperationAction(ISD::UINT_TO_FP, MVT::i8 , Promote);
557 setOperationAction(ISD::UINT_TO_FP, MVT::i16 , Promote);
558
559 setOperationAction(ISD::SINT_TO_FP, MVT::i1 , Promote);
560 setOperationAction(ISD::SINT_TO_FP, MVT::i8 , Promote);
561 setOperationAction(ISD::SINT_TO_FP, MVT::i16 , Promote);
562
563 setOperationAction(ISD::FP_TO_UINT, MVT::i1 , Promote);
564 setOperationAction(ISD::FP_TO_UINT, MVT::i8 , Promote);
565 setOperationAction(ISD::FP_TO_UINT, MVT::i16 , Promote);
566
567 setOperationAction(ISD::FP_TO_SINT, MVT::i1 , Promote);
568 setOperationAction(ISD::FP_TO_SINT, MVT::i8 , Promote);
569 setOperationAction(ISD::FP_TO_SINT, MVT::i16 , Promote);
570
571 setOperationAction(ISD::FABS, MVT::f32 , Custom);
572 setOperationAction(ISD::FABS, MVT::f64 , Custom);
573
574 setOperationAction(ISD::GlobalAddress, DEFAULT_TYPE, Custom);
575 setOperationAction(ISD::BlockAddress, DEFAULT_TYPE, Custom);
576 setOperationAction(ISD::ConstantPool , DEFAULT_TYPE, Custom);
577
578 setOperationAction(ISD::TRAP, MVT::Other, Custom);
579
580// TODO: define TCE instruction for leading/trailing zero count
581 setOperationAction(ISD::CTLZ, DEFAULT_TYPE, Expand);
582 setOperationAction(ISD::CTTZ, DEFAULT_TYPE, Expand);
583 setOperationAction(ISD::CTPOP, DEFAULT_TYPE, Expand);
584 // Using 'old way' MVT::Other to cover all value types is illegal now.
585 setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
586 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
587 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
588 setOperationAction(ISD::SELECT_CC, MVT::f80, Expand);
589 setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
590 setOperationAction(ISD::SELECT_CC, MVT::i8, Expand);
591 setOperationAction(ISD::SELECT_CC, MVT::i16, Expand);
592 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
593 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
594
595
596 // not needed when we uses xor for boolean comparison
597// setOperationAction(ISD::SETCC, MVT::i1, Promote);
598
599 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
600
601 // Expand indirect branches.
602 setOperationAction(ISD::BRIND, MVT::Other, Expand);
603 // Expand jumptable branches.
604 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
605 // Expand conditional branches.
606
607 // only port-guarded jumps..
610 std::cerr << "Only port guarded jumps supported, not expanding bc_cc" << std::endl;
611
612 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
613 setOperationAction(ISD::BRCOND, MVT::i1, Expand);
614 setOperationAction(ISD::BRCOND, MVT::i32, Expand);
615 setOperationAction(ISD::BRCOND, MVT::f16, Expand);
616 setOperationAction(ISD::BRCOND, MVT::f32, Expand);
617 setOperationAction(ISD::BRCOND, MVT::i64, Expand);
618 } else {
619 setOperationAction(ISD::BR_CC, MVT::Other, Expand);
620 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
621 setOperationAction(ISD::BR_CC, MVT::i32, Expand);
622 setOperationAction(ISD::BR_CC, MVT::f16, Expand);
623 setOperationAction(ISD::BR_CC, MVT::f32, Expand);
624 setOperationAction(ISD::BR_CC, MVT::i64, Expand);
625 }
626
627 // Hardware loop ops
628 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
629 if (!opts->disableHWLoops()) {
630 setTargetDAGCombine(ISD::BRCOND);
631 }
632
633#ifdef TARGET64BIT
634 setOperationAction(ISD::BR_CC, MVT::f64, Expand);
635#endif
636 setOperationAction(ISD::MULHU, MVT::i32, Expand);
637 setOperationAction(ISD::MULHS, MVT::i32, Expand);
638
639#ifdef TARGET64BIT
640 setOperationAction(ISD::MULHU, MVT::i64, Expand);
641 setOperationAction(ISD::MULHS, MVT::i64, Expand);
642#endif
643 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
644 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
645 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
646
647 setOperationAction(ISD::VASTART , MVT::Other, Custom);
648
649 setOperationAction(ISD::VAARG , MVT::Other, Expand);
650 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
651 setOperationAction(ISD::VAEND , MVT::Other, Expand);
652 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
653 setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);
654
655 setOperationAction(ISD::DYNAMIC_STACKALLOC, DEFAULT_TYPE, Expand);
656
657 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
658 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
659
660 setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
661
662 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
663 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
664 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
665 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
666
667 setOperationAction(ISD::BSWAP, DEFAULT_TYPE, Expand);
668
669 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
670 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
671
672#ifdef TARGET64BIT
673 setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
674 setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
675 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
676#endif
677
678 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
679 // 3.7 requires the types as target type second parameter,
680 // mem type thid parameter
681 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
682 setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2f16, Expand);
683 setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Expand);
684 setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, MVT::v8f16, Expand);
685 setLoadExtAction(ISD::EXTLOAD, MVT::v16f32, MVT::v16f16, Expand);
686 setLoadExtAction(ISD::EXTLOAD, MVT::v32f32, MVT::v32f16, Expand);
687
688#ifdef TARGET64BIT
689 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
690#endif
691
692#if LLVM_HAS_CUSTOM_VECTOR_EXTENSION == 2
693 setLoadExtAction(ISD::EXTLOAD, MVT::v64f32, MVT::v64f16, Expand);
694 setLoadExtAction(ISD::EXTLOAD, MVT::v128f32, MVT::v128f16, Expand);
695#endif
696
697 if (!tm_.has8bitLoads()) {
698 if (Application::verboseLevel() > 0) {
699 std::cout << "No 8-bit loads in the processor. "
700 << "Emulating 8-bit loads with wider loads. "
701 << "This may be very slow if the program performs "
702 << "lots of 8-bit loads." << std::endl;
703 }
704
705#ifdef TARGET64BIT
706 setLoadExtAction(ISD::EXTLOAD, MVT::i64, MVT::i8, Custom);
707 setLoadExtAction(ISD::SEXTLOAD, MVT::i64, MVT::i8, Custom);
708 setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, MVT::i8, Custom);
709 setOperationAction(ISD::LOAD, MVT::i8, Custom);
710 setOperationAction(ISD::LOAD, MVT::i1, Custom);
711
712 setLoadExtAction(ISD::EXTLOAD, MVT::i64, MVT::i1, Custom);
713 setLoadExtAction(ISD::SEXTLOAD, MVT::i64, MVT::i1, Custom);
714 setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, MVT::i1, Custom);
715#else
716 setLoadExtAction(ISD::EXTLOAD, MVT::i32, MVT::i8, Custom);
717 setLoadExtAction(ISD::SEXTLOAD, MVT::i32, MVT::i8, Custom);
718 setLoadExtAction(ISD::ZEXTLOAD, MVT::i32, MVT::i8, Custom);
719 setOperationAction(ISD::LOAD, MVT::i8, Custom);
720 setOperationAction(ISD::LOAD, MVT::i1, Custom);
721
722 setLoadExtAction(ISD::EXTLOAD, MVT::i32, MVT::i1, Custom);
723 setLoadExtAction(ISD::SEXTLOAD, MVT::i32, MVT::i1, Custom);
724 setLoadExtAction(ISD::ZEXTLOAD, MVT::i32, MVT::i1, Custom);
725#endif
726 }
727
728 if (!tm_.has16bitLoads()) {
729 if (Application::verboseLevel() > 0) {
730 std::cout << "No 16-bit loads in the processor. "
731 << "Emulating 16-bit loads with wider loads. "
732 << "This may be very slow if the program performs "
733 << "lots of 16-bit loads." << std::endl;
734 }
735#ifdef TARGET64BIT
736 setLoadExtAction(ISD::EXTLOAD, MVT::i64, MVT::i16, Custom);
737 setLoadExtAction(ISD::SEXTLOAD, MVT::i64, MVT::i16, Custom);
738 setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, MVT::i16, Custom);
739 setOperationAction(ISD::LOAD, MVT::i16, Custom);
740#else
741 setLoadExtAction(ISD::EXTLOAD, MVT::i32, MVT::i16, Custom);
742 setLoadExtAction(ISD::SEXTLOAD, MVT::i32, MVT::i16, Custom);
743 setLoadExtAction(ISD::ZEXTLOAD, MVT::i32, MVT::i16, Custom);
744 setOperationAction(ISD::LOAD, MVT::i16, Custom);
745#endif
746 }
747
748 setOperationAction(ISD::ADDE, MVT::i32, Expand);
749 setOperationAction(ISD::ADDC, MVT::i32, Expand);
750 setOperationAction(ISD::ADDE, MVT::i16, Expand);
751 setOperationAction(ISD::ADDC, MVT::i16, Expand);
752 setOperationAction(ISD::ADDE, MVT::i8, Expand);
753 setOperationAction(ISD::ADDC, MVT::i8, Expand);
754#ifdef TARGET64BIT
755 setOperationAction(ISD::Constant, MVT::i64, Custom);
756#else
757 setOperationAction(ISD::Constant, MVT::i32, Custom);
758#endif
759
760 setStackPointerRegisterToSaveRestore(TCE::SP);
761
762 // Set missing operations that can be emulated with emulation function
763 // or LLVM built-in emulation pattern to be expanded.
764 const std::set<std::pair<unsigned, llvm::MVT::SimpleValueType> >*
765 missingOps = tm_.missingOperations();
766
767 std::set<std::pair<unsigned, llvm::MVT::SimpleValueType> >::const_iterator
768 iter = missingOps->begin();
769
770 if (Application::verboseLevel() > 0) {
771 Application::logStream() << "Missing ops: ";
772 }
773
774 while (iter != missingOps->end()) {
775 unsigned nodetype = (*iter).first;
776 llvm::MVT::SimpleValueType valuetype = (*iter).second;
777 if (Application::verboseLevel() > 0) {
778 switch (nodetype) {
779 case ISD::SDIV: std::cerr << "SDIV,"; break;
780 case ISD::UDIV: std::cerr << "UDIV,"; break;
781 case ISD::SREM: std::cerr << "SREM,"; break;
782 case ISD::UREM: std::cerr << "UREM,"; break;
783 case ISD::ROTL: std::cerr << "ROTL,"; break;
784 case ISD::ROTR: std::cerr << "ROTR,"; break;
785 case ISD::MUL: std::cerr << "MUL,"; break;
786 case ISD::SIGN_EXTEND_INREG:
787 if (valuetype == MVT::i8) std::cerr << "SXQW,";
788 if (valuetype == MVT::i16) std::cerr << "SXHW,";
789 break;
790 default: std::cerr << nodetype << ", "; break;
791 };
792 }
793 setOperationAction(nodetype, valuetype, Expand);
794 iter++;
795 }
796
797 const std::set<std::pair<unsigned, llvm::MVT::SimpleValueType> >*
798 promotedOps = tm_.promotedOperations();
799
800 iter = promotedOps->begin();
801 while (iter != promotedOps->end()) {
802 unsigned nodetype = (*iter).first;
803 llvm::MVT::SimpleValueType valuetype = (*iter).second;
804 llvm::EVT evt(valuetype);
805 setOperationAction(nodetype, valuetype, Promote);
806 iter++;
807 }
808
809 if (Application::verboseLevel() > 0) {
810 std::cerr << std::endl;
811 }
812
813 auto customLegalizedOps = tm_.customLegalizedOperations();
814 for (auto i : *customLegalizedOps) {
815 unsigned nodetype = i.first;
816 llvm::MVT::SimpleValueType valuetype = i.second;
817 llvm::EVT evt(valuetype);
818 setOperationAction(nodetype, valuetype, Custom);
819 }
820
821 setJumpIsExpensive(true);
822
823 //setShouldFoldAtomicFences(true);
824
825 PredictableSelectIsExpensive = false;
826
827 // Determine which of global addresses by address space id should be //
828 // loaded from constant pool due to limited immediate support. //
829 // Reverse for default address space.
830 loadGAFromConstantPool_[0] = false;
831 for (const auto& as : tm_.ttaMachine().addressSpaceNavigator()) {
832 if (as->numericalIds().empty()) {
833 // No IDs specified, assume default address space ID (0)
834 if (as->end() > tm_.largestImmValue()) {
835 if (Application::verboseLevel() > 0) {
836 std::cerr << "Global addresses by "
837 << "address space id of 0"
838 << " (implicitly specified by AS: " << as->name()
839 << ") will be stored in constant pool."
840 << std::endl;
841 }
842 loadGAFromConstantPool_[0] = true;
843 } else {
844 loadGAFromConstantPool_[0] |= false;
845 }
846 continue;
847 }
848
849 for (unsigned id : as->numericalIds()) {
850 if (as->end() > tm_.largestImmValue()) {
851 if (Application::verboseLevel() > 0) {
852 std::cerr << "Global addresses belonging to "
853 << "address space id of " << id
854 << " (specified by AS: " << as->name()
855 << ") will be stored in constant pool."
856 << std::endl;
857 }
858 loadGAFromConstantPool_[id] = true;
859 } else {
860 loadGAFromConstantPool_[id] |= false;
861 }
862 }
863 }
864
865 setBooleanContents(ZeroOrOneBooleanContent);
866 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
867
870 computeRegisterProperties(subt.getRegisterInfo());
871}
872
873/**
874 * Returns target node opcode names for debugging purposes.
875 *
876 * @param opcode Opcode to convert to string.
877 * @return Opcode name.
878 */
879const char*
881 switch (opcode) {
882 default: return NULL;
883 case TCEISD::CALL: return "TCEISD::CALL";
884 case TCEISD::RET_FLAG: return "TCEISD::RET_FLAG";
885 case TCEISD::GLOBAL_ADDR: return "TCEISD::GLOBAL_ADDR";
886 case TCEISD::CONST_POOL: return "TCEISD::CONST_POOL";
887 case TCEISD::FTOI: return "TCEISD::FTOI";
888 case TCEISD::ITOF: return "TCEISD::ITOF";
889 case TCEISD::SELECT_I1: return "TCEISD::SELECT_I1";
890 case TCEISD::SELECT_I8: return "TCEISD::SELECT_I8";
891 case TCEISD::SELECT_I16: return "TCEISD::SELECT_I16";
892 case TCEISD::SELECT_I32: return "TCEISD::SELECT_I32";
893 case TCEISD::SELECT_I64: return "TCEISD::SELECT_I64";
894 case TCEISD::SELECT_F16: return "TCEISD::SELECT_F16";
895 case TCEISD::SELECT_F32: return "TCEISD::SELECT_F32";
896 case TCEISD::SELECT_F64: return "TCEISD::SELECT_F64";
897 }
898}
899
900SDValue TCETargetLowering::LowerTRAP(SDValue Op, SelectionDAG &DAG) const {
901 SDLoc dl(Op);
902
903 TargetLowering::ArgListTy Args;
904
905 TargetLowering::CallLoweringInfo CLI(DAG);
906 CLI.setDebugLoc(dl);
907 CLI.setChain(Op->getOperand(0));
908 CLI.setCallee(
909 CallingConv::C,
910 Type::getVoidTy(*DAG.getContext()),
911 DAG.getExternalSymbol("_exit",
912 getPointerTy(getTargetMachine().createDataLayout(), 0)),
913 std::move(Args));
914 CLI.setInRegister(false);
915 CLI.setNoReturn(true);
916 CLI.setVarArg(false);
917 CLI.setTailCall(false);
918 CLI.setDiscardResult(false);
919 CLI.setSExtResult(false);
920 CLI.setZExtResult(false);
921
922 std::pair<SDValue, SDValue> CallResult =
923 LowerCallTo(CLI);
924 return CallResult.second;
925
926}
927
928
929SDValue
930TCETargetLowering::LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const {
931 const GlobalAddressSDNode* gn = cast<GlobalAddressSDNode>(Op);
932 const GlobalValue* gv = gn->getGlobal();
933 // FIXME there isn't really any debug info here
934 SDLoc dl(Op);
935
936#if 0
937 std::cerr << "lowering GA: AS = " << gn->getAddressSpace() << ", ";
938 gv->getValueType()->dump();
939#endif
940
941 if (shouldLoadFromConstantPool(gn->getAddressSpace())) {
942 // Immediate support for the address space is limited. Therefore,
943 // the address must be loaded from constant pool.
944 auto vt = getPointerTy(DAG.getDataLayout(), gn->getAddressSpace());
945 SDValue cpIdx = DAG.getConstantPool(
946 gv, getPointerTy(DAG.getDataLayout()));
947 llvm::Align Alignment = cast<ConstantPoolSDNode>(cpIdx)->getAlign();
948 SDValue result = DAG.getLoad(vt, dl, DAG.getEntryNode(), cpIdx,
949 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())
950 );
951
952 if (Application::verboseLevel() > 0) {
953 std::cerr << "Expanded Global Value to a load from "
954 << "the constant pool." << std::endl;
955 }
956 return result;
957 }
958 SDValue tga = DAG.getTargetGlobalAddress(gv, dl, DEFAULT_TYPE);
959 return DAG.getNode(TCEISD::GLOBAL_ADDR, SDLoc(Op), DEFAULT_TYPE, tga);
960}
961
962SDValue
963TCETargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
964 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
965
966 SDValue BA_SD = DAG.getTargetBlockAddress(BA, DEFAULT_TYPE);
967 SDLoc dl(Op);
968 return DAG.getNode(
970 getPointerTy(getTargetMachine().createDataLayout(), 0), BA_SD);
971}
972
973static SDValue LowerCONSTANTPOOL(SDValue Op, SelectionDAG &DAG) {
974 // TODO: Check this.
975 llvm::MVT ptrVT = Op.getValueType().getSimpleVT();
976 ConstantPoolSDNode* cp = cast<ConstantPoolSDNode>(Op);
977 SDValue res;
978 if (cp->isMachineConstantPoolEntry()) {
979 res = DAG.getTargetConstantPool(
980 cp->getMachineCPVal(), ptrVT,
981 cp->getAlign());
982 } else {
983 res = DAG.getTargetConstantPool(
984 cp->getConstVal(), ptrVT,
985 cp->getAlign());
986
987 }
988 return DAG.getNode(TCEISD::CONST_POOL, SDLoc(Op), DEFAULT_TYPE, res);
989}
990
991SDValue
992TCETargetLowering::LowerConstant(SDValue Op, SelectionDAG &DAG) const {
993 ConstantSDNode* cn = cast<ConstantSDNode>(Op.getNode());
994 assert(cn);
995
996 if (canEncodeImmediate(*cn)) {
997 return Op;
998 } else {
999 // The constant is not supported immediate, return empty SDValue, so
1000 // it gets converted to a load from a constant pool.
1001 if (Application::verboseLevel() > 0) {
1002 std::cerr << "Expand constant of " << cn->getSExtValue();
1003 std::cerr << " to a load from the constant pool." << std::endl;
1004 }
1005 // Since LLVM 3.8 LLVM's DAG Legalization does the expansion from
1006 // constant to constant pool load.
1007 return SDValue(nullptr, 0);
1008 }
1009}
1010
1012SDValue Op, MVT newElementVT, int elemCount, SelectionDAG &DAG) const {
1013
1014 BuildVectorSDNode* node = cast<BuildVectorSDNode>(Op);
1015 MVT mvt = Op.getSimpleValueType();
1016 int laneWidth = newElementVT.getSizeInBits();
1017
1018 std::vector<SDValue> packedConstants(elemCount/laneWidth);
1019 for (int i = 0; i < elemCount; i+=laneWidth) {
1020 unsigned int packedVal = 0;
1021 for (int j = 0; j < laneWidth; j++) {
1022 const SDValue& operand = node->getOperand(i+j);
1023 SDNode* opdNode = operand.getNode();
1024 if (isa<ConstantSDNode>(opdNode)) {
1025 ConstantSDNode* cn = cast<ConstantSDNode>(opdNode);
1026 if (cn->isOne()) {
1027 packedVal += (1<< j);
1028 }
1029 }
1030 }
1031 packedConstants[i/laneWidth] = DAG.getConstant(packedVal, Op, newElementVT);
1032 }
1033 EVT wvt = EVT::getVectorVT(*DAG.getContext(), newElementVT, elemCount/laneWidth);
1034 SDValue intVectorBuild = DAG.getNode(ISD::BUILD_VECTOR, Op, wvt, packedConstants);
1035 SDValue retValue = DAG.getNode(ISD::BITCAST, Op, mvt, intVectorBuild);
1036 return retValue;
1037}
1038
1039SDValue
1040TCETargetLowering::LowerBuildVector(SDValue Op, SelectionDAG &DAG) const {
1041
1042 MVT elemVT = Op.getSimpleValueType().getScalarType();
1043 BuildVectorSDNode* node = cast<BuildVectorSDNode>(Op);
1044 int elemCount = node->getNumOperands();
1045
1046 if (isConstantOrUndefBuild(*node)) {
1047 if (!isBroadcast(node)) {
1048 // Convert boolean vector into wider vector.
1049 // Use int here.
1050
1051 auto vt = Op.getValueType();
1052 bool scalarizedPack = false;
1053 if (vt.isVector() && vt.getSizeInBits() == 32) {
1054 unsigned int packedVal = 0;
1055 unsigned int laneW = vt.getScalarSizeInBits();
1056 for (int i = 0;
1057 i < vt.getVectorElementCount().getKnownMinValue(); i++) {
1058 auto oprd = node->getOperand(i);
1059 ConstantSDNode* cn = cast<ConstantSDNode>(oprd);
1060 unsigned int val = cn->getZExtValue();
1061 val = val & (~0u >> (32 - laneW));
1062 packedVal |= (val << (laneW*i));
1063 }
1064 if (tm_.canEncodeAsMOVI(MVT::i32, packedVal)) {
1065 auto packedNode =
1066 DAG.getConstant(packedVal, Op, MVT::i32);
1067 return DAG.getNode(ISD::BITCAST, Op, vt, packedNode);
1068 }
1069 }
1070
1071 if (elemVT == MVT::i1) {
1072 if (elemCount > 31) {
1073 assert(elemCount % 32 == 0);
1074 int intElemCount = elemCount/32;
1075 TCEString wideOpName = "PACK32X"; wideOpName << intElemCount;
1076 if (tm_.hasOperation(wideOpName)) {
1078 Op, MVT::i32, elemCount, DAG);
1079 }
1080 }
1081/* TODO: this does not work if u16 and i8 value types not legal.
1082 if (elemCount > 15 && elemCount < 4096) {
1083 assert(elemCount % 16 == 0);
1084 int shortElemCount = elemCount/16;
1085 TCEString wideOpName = "PACK16X"; wideOpName << shortElemCount;
1086 if (tm_.hasOperation(wideOpName)) {
1087 return LowerBuildBooleanVectorVector(
1088 Op, MVT::i16, elemCount, DAG);
1089 }
1090 }
1091 if (elemCount > 7 && elemCount < 2048) {
1092 assert(elemCount % 8 == 0);
1093 int charElemCount = elemCount/8;
1094 TCEString wideOpName = "PACK8X"; wideOpName << charElemCount;
1095 if (tm_.hasOperation(wideOpName)) {
1096 return LowerBuildBooleanVectorVector(
1097 Op, MVT::i8, elemCount, DAG);
1098 }
1099 }
1100*/
1101 if (elemCount > 255) {
1102 std::cerr << "Warning: Lowering Boolean vector build with"
1103 << " more than 255 elements. LLVM does not"
1104 << " support instructions with more than"
1105 << " 255 operands so this will probably fail."
1106 << " Add a pack instruction using wider lane"
1107 << " width, such as PACK32X" << (elemCount/32)
1108 << " into your architecture."
1109 << std::endl;
1110 }
1111 } else { // not boolean.
1112 // makes no sense to have zillion inputs to build_vector.
1113 // load from const pool instead.
1114 TCEString packName = "PACK";
1115 switch (elemVT.SimpleTy) {
1116 case MVT::i8: packName << "8"; break;
1117 case MVT::i16: packName << "16"; break;
1118 case MVT::i32: packName << "32"; break;
1119 default: std::cerr << elemVT.SimpleTy << ", "; break;
1120 }
1121 packName << "X" << elemCount;
1122 // pack op not found from the adf or too big
1123 if (elemCount > 4 || !tm_.hasOperation(packName)) {
1124 return SDValue(nullptr, 0);
1125 }
1126 }
1127 }
1128
1129 if (canEncodeConstantOperands(*node)) {
1130 return Op;
1131 }
1132 }
1133
1134 // TODO: Check if there is enough register for the build_vector needed by
1135 // LLVM's register allocator.
1136
1137 // There is issue with build_vector to be selected as all-register-operand
1138 // version of PACK (i.e. PACKtrrrrrrrr). LLVM's register allocator tries
1139 // allocate as many i32 registers as there is register operands. For
1140 // example with PACK8X64, the allocator tries to reserve 64 i32 register(!)
1141 // and likely runs out of them.
1142
1143 if (Application::verboseLevel() > 1) {
1144 std::cerr << "Expanding build_vector of "
1145 << Op->getValueType(0).getEVTString()
1146 << " = { ";
1147 for (unsigned i = 0; i < node->getNumOperands(); i++) {
1148 auto opdNode = node->getOperand(i).getNode();
1149 if (isa<ConstantSDNode>(opdNode)) {
1150 ConstantSDNode* cn = cast<ConstantSDNode>(opdNode);
1151 std::cerr << cn->getSExtValue() << " ";
1152 } else {
1153 std::cerr << "Reg ";
1154 }
1155 }
1156 std::cerr << "}" << std::endl;
1157 }
1158
1159 // TODO: Expand to insert_vector_elt chain rather than to expansion done by
1160 // LLVM
1161
1162 // Expand to a load from constant pool or to an in-stack fabrication.
1163 return SDValue(nullptr, 0);
1164}
1165
1166SDValue
1167TCETargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
1168
1169 // ARM ripoff
1170
1171 // vastart just stores the address of the VarArgsFrameIndex slot into the
1172 // memory location argument.
1173 SDLoc dl(Op);
1174 EVT PtrVT =
1175 DAG.getTargetLoweringInfo().getPointerTy(
1176 getTargetMachine().createDataLayout(), 0);
1177 SDValue FR = DAG.getFrameIndex(getVarArgsFrameOffset(), PtrVT);
1178 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1179 return DAG.getStore(
1180 Op.getOperand(0), dl, FR, Op.getOperand(1), MachinePointerInfo(SV));
1181}
1182
1183/**
1184 * Returns the preferred result type of comparison operations.
1185 *
1186 * @param VT Result type of the comparison operation.
1187 * @return Preferred comparison result type.
1188 */
1189EVT
1191 const DataLayout &DL, LLVMContext &context, llvm::EVT VT) const {
1192 if (VT.isVector()) {
1193 EVT resultVectorType = getSetCCResultVT(VT);
1194 if (resultVectorType != MVT::INVALID_SIMPLE_VALUE_TYPE) {
1195 return resultVectorType;
1196 }
1197 }
1198 if (!VT.isVector()) return hasI1RC_ ? llvm::MVT::i1 : llvm::MVT::i32;
1199 return VT.changeVectorElementTypeToInteger();
1200}
1201
1202#ifdef OLD_VECTOR_CODE
1203static
1204SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) {
1205 EVT VT = Op.getValueType();
1206 DebugLoc dl = Op.getDebugLoc();
1207 SDValue Chain = Op.getOperand(0);
1208
1209 // TODO: why is this here?
1210 if (VT == MVT::v4i32) {
1211 EVT ptrVT = Op.getOperand(1).getValueType();
1212
1213 SDValue Ptr0, Ptr1, Ptr2, Ptr3;
1214 SDValue Imm0 = DAG.getConstant(0, ptrVT);
1215 SDValue Imm1 = DAG.getConstant(1, ptrVT);
1216 SDValue Imm2 = DAG.getConstant(2, ptrVT);
1217 SDValue Imm3 = DAG.getConstant(3, ptrVT);
1218
1219 Ptr0 = Op.getOperand(1);
1220 Ptr1 = DAG.getNode(ISD::ADD, dl, ptrVT,
1221 Op.getOperand(1), Imm1);
1222 Ptr2 = DAG.getNode(ISD::ADD, dl, ptrVT,
1223 Op.getOperand(1), Imm2);
1224 Ptr3 = DAG.getNode(ISD::ADD, dl, ptrVT,
1225 Op.getOperand(1), Imm3);
1226 SDValue Elt0 = DAG.getLoad(
1227 MVT::i32, dl, Chain, Ptr0, MachinePointerInfo(), false, false, 0);
1228 SDValue Elt1 = DAG.getLoad(
1229 MVT::i32, dl, Chain, Ptr1, MachinePointerInfo(), false, false, 0);
1230 SDValue Elt2 = DAG.getLoad(
1231 MVT::i32, dl, Chain, Ptr2, MachinePointerInfo(), false, false, 0);
1232 SDValue Elt3 = DAG.getLoad(
1233 MVT::i32, dl, Chain, Ptr3, MachinePointerInfo(), false, false, 0);
1234 // SDValue Result = DAG.getTargetInsertSubreg(0, dl, MVT::v4i32,
1235 // DAG.getTargetInsertSubreg(1, dl, MVT::v4i32,
1236 // DAG.getTargetInsertSubreg(2, dl, MVT::v4i32,
1237 // DAG.getTargetInsertSubreg(3, dl, MVT::v4i32,
1238 // DAG.getUNDEF(MVT::v4i32),
1239 // Elt3), Elt2), Elt1), Elt0);
1240
1241 // SDValue Result = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1242 // Elt0, Elt1, Elt2, Elt3);
1243
1244 SDValue Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v4i32,
1245 DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v4i32,
1246 DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v4i32,
1247 DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v4i32,
1248 DAG.getNode(ISD::UNDEF, dl, MVT::v4i32),
1249 Elt0, Imm0),
1250 Elt1, Imm1),
1251 Elt2, Imm2),
1252 Elt3, Imm3);
1253
1254 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1255 Elt0.getValue(1), Elt1.getValue(1),
1256 Elt2.getValue(1), Elt3.getValue(1));
1257
1258 SDValue Ops[] = {Result, Chain};
1259
1260 return DAG.getMergeValues(Ops, 2, dl);
1261 }
1262
1263 llvm_unreachable("Invalid LOAD to lower!");
1264}
1265#endif
1266
1267std::pair<int, TCEString> TCETargetLowering::getConstShiftNodeAndTCEOP(SDValue op) const {
1268 switch(op.getOpcode()) {
1269 case ISD::SRA:
1270 return std::make_pair(TCEISD::SRA_Const, TCEString("SHR"));
1271 case ISD::SRL:
1272 return std::make_pair(TCEISD::SRL_Const, TCEString("SHRU"));
1273 case ISD::SHL:
1274 return std::make_pair(TCEISD::SHL_Const, TCEString("SHL"));
1275 default:
1276 return std::make_pair(0, TCEString("unknown op"));
1277 }
1278}
1279
1280SDValue
1281TCETargetLowering::LowerShift(SDValue op, SelectionDAG& dag) const {
1282
1283 auto shiftOpcodes = getConstShiftNodeAndTCEOP(op);
1284 int shiftOpcode = shiftOpcodes.first;
1285 assert(shiftOpcode && "Shift opcide not supported, should not be here");
1286
1287 SDValue R = op.getOperand(0);
1288 SDValue Amt = op.getOperand(1);
1289 const DebugLoc& dl = op.getDebugLoc();
1290 std::set<unsigned long> supportedShifts;
1291
1292
1293 // find all the constant shifts
1294 for (int i = 1; i < 32; i++) {
1295 TCEString opName = shiftOpcodes.second; opName << i << "_32";
1296 if (tm_.hasOperation(opName)) {
1297 supportedShifts.insert(i);
1298 }
1299 }
1300
1301 // add also 1-bit shift for add
1302 // we should ALWAYS have an add but - lets check to be sure ;)
1303 if (tm_.hasOperation("ADD")) {
1304 supportedShifts.insert(1);
1305 }
1306
1307 if (Amt.getOpcode() == ISD::Constant) {
1308 unsigned long amount = op.getConstantOperandVal(1);
1309 // if has no correct-width shift, need to break down into multiple.
1310 if (supportedShifts.find(amount) == supportedShifts.end()) {
1311 // find the biggest suitable shift.
1312 for (auto i = supportedShifts.rbegin();
1313 i != supportedShifts.rend(); i++) {
1314 if (amount > *i) {
1315 auto shiftVal =
1316 dag.getConstant(*i, op, Amt.getValueType());
1317 auto remVal =
1318 dag.getConstant(amount - *i, op, Amt.getValueType());
1319 SDValue remaining = dag.getNode(
1320 op.getOpcode(), op, op.getValueType(), R, remVal);
1321 SDValue lowered = LowerShift(remaining, dag);
1322 SDValue shift = dag.getNode(
1323 shiftOpcode, op, op.getValueType(), lowered, shiftVal);
1324 return shift;
1325 }
1326 }
1327 }
1328 return op;
1329
1330 } else {
1331 unsigned Opc = op.getOpcode();
1332 switch(Opc) {
1333 case ISD::SRA:
1334 return ExpandLibCall(RTLIB::SRA_I32, op.getNode(), true, dag);
1335 case ISD::SRL:
1336 return ExpandLibCall(RTLIB::SRL_I32, op.getNode(), false, dag);
1337 case ISD::SHL:
1338 return ExpandLibCall(RTLIB::SHL_I32, op.getNode(), false, dag);
1339 default:
1340 std::cerr << "Invalid dynamic shift opcode" << std::endl;
1341 }
1342 }
1343 return op;
1344}
1345
1346SDValue
1347TCETargetLowering::lowerHWLoops(SDValue op, SelectionDAG &dag) const {
1348 if (cast<ConstantSDNode>(op->getOperand(1))->getZExtValue() !=
1349 Intrinsic::set_loop_iterations) {
1350 std::cerr << "Trying to lower invalid hwloop instruction"
1351 << std::endl;
1352 return op;
1353 }
1354
1355 // If hwloop is last instruction, let TDGen do the lowering.
1356 if (op->use_empty()) return op;
1357
1358 // If we see a jump after hwloop, we have the hwloop
1359 // in correct place. Let TDGen handle the hwloop lowering.
1360 if (op->use_size() > 1) {
1361 dag.dump();
1362 assert(false && "HWLoop should not have more than one Use");
1363 }
1364 auto linkNode = op->use_begin();
1365 auto linkOpc = linkNode->getOpcode();
1366 if (linkOpc == ISD::BR || linkOpc == ISD::HANDLENODE) {
1367 // hwloop follows branch (or) last instruction of BB,
1368 // No action needed.
1369 return op;
1370 }
1371
1372 // Sanity check for known pattern. Expected patterns,
1373 // - hwloop -> TokenFactor -> BR (or)
1374 // - hwloop -> TokenFactor
1375 if (linkOpc != ISD::TokenFactor || linkNode->use_size() > 1) {
1376 dag.dump();
1377 assert(false && "HWLoop loop pattern not implemented.");
1378 }
1379
1380 // Create HWLOOP operands with link to ISD::BR node
1381 // i.e. TokenFactor -> HWLOOP -> BR
1382 SmallVector<SDValue, 8> ops;
1383 SmallVector<SDValue, 8> linkOps;
1384 bool replaceLinkNode = false;
1385 for (int i = 0; i < op.getNumOperands(); i++) {
1386 // Swap the use list of op and linkNode
1387 if (i == 0) {
1388 // Set TokenFactor as 1st operand
1389 ops.push_back(SDValue(*linkNode, 0));
1390
1391 // Create operand list for linkNode
1392 for (int j = 0; j < linkNode->getNumOperands(); j++) {
1393 if (linkNode->getOperand(j) == op) {
1394 linkOps.push_back(op.getOperand(i));
1395 } else {
1396 linkOps.push_back(linkNode->getOperand(j));
1397 }
1398 }
1399 replaceLinkNode = true;
1400 } else {
1401 // Keep rest of the operands as it is in hwloop
1402 ops.push_back(op.getOperand(i));
1403 }
1404 }
1405 SDLoc dl(op);
1406 dag.ReplaceAllUsesWith(*linkNode, &op);
1407 auto Chain = dag.UpdateNodeOperands(op.getNode(), ArrayRef<SDValue>(ops));
1408 if (replaceLinkNode) {
1409 SDValue newLinkNode = dag.getNode(
1410 linkNode->getOpcode(), dl, MVT::Other,
1411 ArrayRef<SDValue>(linkOps));
1412 dag.ReplaceAllUsesWith(*linkNode, &newLinkNode);
1413 }
1414 return op;
1415}
1416
1417/**
1418 * Lowers FABS operation.
1419 *
1420 * TODO: Do not custom lower if FABS supported by machine.
1421 */
1422SDValue
1423TCETargetLowering::lowerFABS(SDValue op, SelectionDAG &dag) const {
1424 MVT VT = op.getSimpleValueType();
1425 assert(VT == MVT::f32 || VT == MVT::f64);
1426
1427 SDLoc DL(op);
1428 MVT castType = (VT == MVT::f32) ? MVT::i32 : MVT::i64;
1429
1430 // Bitcast to integer type.
1431 SDValue tmp = dag.getNode(ISD::BITCAST, DL, castType, op.getOperand(0));
1432 // Mask with MSB set to zero.
1433 SDValue mask = (VT == MVT::f32) ? dag.getConstant(0x7fffffff, DL, MVT::i32)
1434 : dag.getConstant(0x7fffffffffffffff, DL, MVT::i64);
1435 // Bitcast back to floating point type.
1436 SDValue tmp2 = dag.getNode(ISD::AND, DL, castType, tmp, mask);
1437 return dag.getNode(ISD::BITCAST, DL, VT, tmp2);
1438}
1439
1440/**
1441 * Handles custom operation lowerings.
1442 */
1443SDValue
1444TCETargetLowering::LowerOperation(SDValue op, SelectionDAG& dag) const {
1445 switch(op.getOpcode()) {
1446 case ISD::TRAP: return LowerTRAP(op, dag);
1447 case ISD::GlobalAddress: return LowerGLOBALADDRESS(op, dag);
1448 case ISD::BlockAddress: return LowerBlockAddress(op, dag);
1449 case ISD::VASTART: return LowerVASTART(op, dag);
1450 case ISD::ConstantPool: return LowerCONSTANTPOOL(op, dag);
1451 case ISD::Constant: return LowerConstant(op, dag);
1452 case ISD::BUILD_VECTOR: return LowerBuildVector(op, dag);
1453 case ISD::SHL:
1454 case ISD::SRA:
1455 case ISD::SRL: return LowerShift(op, dag);
1456 case ISD::LOAD: return lowerExtOrBoolLoad(op, dag);
1457 case ISD::INTRINSIC_VOID:
1458 return lowerHWLoops(op, dag);
1459 case ISD::DYNAMIC_STACKALLOC: {
1460 assert(false && "Dynamic stack allocation not yet implemented.");
1461 }
1462 case ISD::FABS: return lowerFABS(op, dag);
1463#ifdef OLD_VECTOR_CODE
1464 case ISD::LOAD: return LowerLOAD(op, dag);
1465#endif
1466 }
1467 op.getNode()->dump(&dag);
1468 assert(0 && "Custom lowerings not implemented!");
1469}
1470
1471SDValue
1472TCETargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const {
1473 SelectionDAG &DAG = DCI.DAG;
1474 SDLoc dl(N);
1475 switch (N->getOpcode()) {
1476 default:
1477 break;
1478 case ISD::BRCOND: {
1479 SDValue Cond = N->getOperand(1);
1480 SDValue Target = N->getOperand(2);
1481 // Corner case for decrement -> setcc -> brcond link
1482 if (Cond.getOpcode() == ISD::SETCC)
1483 Cond = Cond.getOperand(0);
1484 if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
1485 cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() ==
1486 Intrinsic::loop_decrement) {
1487 /// Replace the decrement use chain with it predecessor
1488 /// The decrement should connect to BRCOND node directly or
1489 /// via TokeneFactor. If it is connected via TokenFactor, move
1490 /// the decrement close to BRCOND by updating the chain
1491 auto chain = N->getOperand(0);
1492 // Correct form. no action needed
1493 if (chain.getNode() == Cond.getNode()) {
1494 DAG.SelectNodeTo(N, TCE::LJUMP, MVT::Other,
1495 N->getOperand(2), Cond->getOperand(0));
1496 } else {
1497 assert((chain.getOpcode() == ISD::TokenFactor) &&
1498 "chain to brcond is not TokenFactor.");
1499 assert((N->use_begin()->getOpcode() == ISD::BR) &&
1500 "brcond is not connected to br.");
1501 SmallVector<SDValue, 8> Ops;
1502 bool hasDecrement = false;
1503 for (unsigned i = 0, e = chain->getNumOperands(); i != e;
1504 ++i) {
1505 if (chain->getOperand(i).getNode() ==
1506 Cond.getNode()) {
1507 hasDecrement = true;
1508 Ops.push_back(Cond->getOperand(0));
1509 } else {
1510 Ops.push_back(chain->getOperand(i));
1511 }
1512 }
1513 assert(hasDecrement &&
1514 "Unable to find Chain for loop decrement");
1515 auto newChain = DAG.getNode(
1516 ISD::TokenFactor, SDLoc(chain), MVT::Other, Ops);
1517 DAG.ReplaceAllUsesOfValueWith(chain, newChain);
1518
1519 // Custom ISEL for LJUMP
1520 DAG.UpdateNodeOperands(
1521 N, newChain, N->getOperand(1), N->getOperand(2));
1522 DAG.SelectNodeTo(
1523 N, TCE::LJUMP, MVT::Other, N->getOperand(2),
1524 newChain);
1525 }
1526 }
1527 }
1528 }
1529 return SDValue();
1530}
1531
1532//===----------------------------------------------------------------------===//
1533// Inline Assembly Support
1534//===----------------------------------------------------------------------===//
1535
1536/// getConstraintType - Given a constraint letter, return the type of
1537/// constraint it is for this target.
1538TCETargetLowering::ConstraintType
1539TCETargetLowering::getConstraintType(StringRef Constraint) const {
1540 if (Constraint.size() == 1) {
1541 switch (Constraint[0]) {
1542 default: break;
1543 case 'r': return C_RegisterClass;
1544 }
1545 }
1546 return TargetLowering::getConstraintType(Constraint);
1547}
1548
1549const TargetRegisterClass*
1551 const TargetRegisterInfo* TRI,
1552 MVT VT) const {
1553
1554 if (!VT.isVector()) return nullptr;
1555
1556 const TargetRegisterClass* bestVRC = nullptr;
1557 // Find smallest RF by using stack spill size as reg size indication.
1558 for (unsigned i = 0U; i < TRI->getNumRegClasses(); i++) {
1559 auto vrc = TRI->getRegClass(i);
1560 if (TRI->isTypeLegalForClass(*vrc, VT) &&
1561 (!bestVRC || vrc->MC->RegsSize < bestVRC->MC->RegsSize)) {
1562 bestVRC = vrc;
1563 }
1564 }
1565 return bestVRC;
1566}
1567
1568/**
1569 * Returns proper register class for given value type.
1570 *
1571 * @param Constraint A constraint defined for an inline asm operation operand.
1572 * @return Proper register class for the operand value type.
1573 */
1574std::pair<unsigned, const TargetRegisterClass *>
1576 const TargetRegisterInfo *TRI,
1577 StringRef Constraint, MVT VT) const {
1578 if (Constraint.size() == 1) {
1579 // check if value type is a vector and return associated reg class
1580 std::pair<unsigned, const TargetRegisterClass *> rcPair =
1582
1583 switch (Constraint[0]) {
1584 case 'r':
1585 // if found associated vector reg class
1586 if (rcPair.second != NULL) {
1587 return rcPair;
1588 }
1589 }
1590 }
1591
1592 bool isPhysicalRegister = Constraint.size() > 3
1593 && Constraint.front() == '{' && Constraint.back() == '}';
1594
1595 const TargetRegisterClass* vrc = nullptr;
1596 if (Constraint.size() == 1) {
1597 switch (Constraint[0]) {
1598 case 'r':
1599 // Prefer vector RFs for vector types and then try
1600 // scalar RFs.
1602 if (vrc) return std::make_pair(0U, vrc);
1603
1604 switch (VT.getSizeInBits()) {
1605 case 8:
1606 case 16:
1607 case 32:
1608 case 64:
1609 return std::make_pair(0U, &DEFAULT_REG_CLASS);
1610 default:
1611 break;
1612 }
1613 return std::make_pair(0U, nullptr); // return error.
1614 // TODO: this should be some other char. But change it in devel64b
1615 case 's':
1616 return std::make_pair(0U, &TCE::R64RegsRegClass);
1617 case 'f':
1618 if (VT == MVT::f32) {
1619 return std::make_pair(0U, &TCE::FPRegsRegClass);
1620 }
1621#ifdef TARGET64BIT
1622 case 'd':
1623 return std::make_pair(0U, &TCE::R64DFPRegsRegClass);
1624#endif
1625 }
1626 } else if (isPhysicalRegister) {
1627 // Constraint = {<RF-name>.<Register-index>}
1628 const std::string regName = Constraint.substr(1, Constraint.size()-2).str();
1629 unsigned regId = tm_.llvmRegisterId(regName);
1630 if (regId == TCE::NoRegister) {
1631 // No such register. Return error.
1632 return std::make_pair(0, nullptr);
1633 }
1634
1635 // In case the reg is boolean register via local register
1636 // variable (ie. "register int foo asm("BOOL.1") = ...").
1637 if (TCE::R1RegsRegClass.contains(regId)) {
1638 return std::make_pair(regId, &TCE::R1RegsRegClass);
1639 }
1640 if (TCE::GuardRegsRegClass.contains(regId)) {
1641 return std::make_pair(regId, &TCE::GuardRegsRegClass);
1642 }
1643
1644 return std::make_pair(regId, TRI->getMinimalPhysRegClass(regId, VT));
1645 }
1646 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
1647}
1648
1649// For invalid constraint, like unsupported immediates, add nothing into Ops.
1650void
1652 SDValue Op,
1653 std::string& Constraint,
1654 std::vector<SDValue>& Ops,
1655 SelectionDAG& DAG) const {
1656
1657 if (Constraint.length() == 1) {
1658 switch (Constraint[0]) {
1659 case 'i':
1660 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
1661 if (!canEncodeImmediate(*C)) {
1662 return;
1663 }
1664 }
1665 break;
1666 default:
1667 break;
1668 }
1669 }
1670
1671 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
1672}
1673
1674std::vector<unsigned> TCETargetLowering::
1675getRegClassForInlineAsmConstraint(const std::string &Constraint,
1676 EVT VT) const {
1677 if (Constraint.size() != 1)
1678 return std::vector<unsigned>();
1679
1680 switch (Constraint[0]) {
1681 default: break;
1682 case 'r':
1683 // TODO: WHAT TO DO WITH THESE?
1684 return std::vector<unsigned>(1,0);
1685
1686 }
1687
1688 return std::vector<unsigned>();
1689}
1690
1691bool
1692TCETargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
1693 return false;
1694}
1695
1696
1697/*
1698bool
1699 TCETargetLowering::allowsMisalignedMemoryAccesses(EVT, unsigned, unsigned, MachineMemOperand::Flags, bool*) const {
1700 /// @todo This commented area and the whole function is probably not
1701 /// needed anymore. The base class version returns false as default.
1702 return false;
1703}
1704*/
1705
1706/**
1707 * Returns true if all operands of the SDNode are constants or undefined.
1708 */
1709bool
1711 for (unsigned i = 0; i < node.getNumOperands(); i++) {
1712 auto opc = node.getOperand(i)->getOpcode();
1713 if (opc != ISD::Constant && opc != ISD::UNDEF) {
1714 return false;
1715 }
1716 }
1717 return true;
1718}
1719
1720/**
1721 * Check if constant operands used by the SDNode can be encoded as immediate
1722 * on the target machine.
1723 */
1724bool
1726 for (unsigned i = 0; i < node.getNumOperands(); i++) {
1727 if (node.getOperand(i)->getOpcode() != ISD::Constant) continue;
1728 ConstantSDNode* cn =
1729 cast<ConstantSDNode>(node.getOperand(i).getNode());
1730 if (!canEncodeImmediate(*cn)) return false;
1731 }
1732 return true;
1733}
1734
1735/**
1736 * Check if the constant can be generally encoded as immediate
1737 * on the target machine.
1738 */
1739bool
1740TCETargetLowering::canEncodeImmediate(const ConstantSDNode& node) const {
1741 int64_t val = node.getSExtValue();
1742 MVT vt = node.getSimpleValueType(0);
1743
1744 // We accept here only constant that can be materialized in instruction
1745 // selection in some way and this must be done by the lowest common
1746 // denominator.
1747
1748 // can encode as MOVI?
1749 // Assuming here, that the immediate can be transported to any target
1750 // machine operation.
1751 if (tm_.canEncodeAsMOVI(vt, val))
1752 return true;
1753
1754 // can encode as immediate to operation
1755 // TODO?
1756
1757 // can encode as immToOp for user that is exactly known to be selected
1758 // to certain target instruction?
1759
1760 // can encode through ISEL transformation?
1761 if (tm_.canMaterializeConstant(*node.getConstantIntValue()))
1762 return true;
1763
1764 return false;
1765}
1766
1767/**
1768 * Returns true if the address values should be loaded from constant pool due
1769 * to limited immediate support.
1770 *
1771 */
1772bool
1774 if (loadGAFromConstantPool_.count(addressSpace) == 0) {
1775 // Default behavior for unspecified address spaces.
1777 return loadGAFromConstantPool_.at(0);
1778 }
1779
1780 return loadGAFromConstantPool_.at(addressSpace);
1781}
1782
1783/**
1784 * Returns true if the target machine has register class for i1 types.
1785 */
1786bool
1788 if (TCE::R1RegsRegClass.getNumRegs() == 0) return false;
1789
1790 // TDGen generates dummy register class for the machines without boolean
1791 // RFs.
1792 if (TCE::R1RegsRegClass.getNumRegs() == 1) {
1793 std::string regName = tm_.rfName(TCE::R1RegsRegClass.getRegister(0));
1794 if (regName.find("dummy") != std::string::npos) return false;
1795 }
1796
1797 return true;
1798}
1799
1800/**
1801 * Check the FP in bits can be fit in machine's immediates.
1802 */
1803bool
1805 const APFloat& apf, EVT VT, bool forCodeSize) const {
1806 if (VT==MVT::f32 || VT==MVT::f16) {
1807 return tm_.canEncodeAsMOVF(apf);
1808 }
1809 return false;
1810}
1811
1812bool
1814 if (n->getOpcode() != ISD::BUILD_VECTOR) {
1815 return false;
1816 }
1817 SDValue val = n->getOperand(0);
1818 int operandCount = n->getNumOperands();
1819 for (unsigned i = 1; i <operandCount; i++) {
1820 SDValue val2 = n->getOperand(i);
1821 SDNode* node2 = dyn_cast<SDNode>(val2);
1822 if (node2->getOpcode() != ISD::UNDEF) {
1823 if (val2 != val)
1824 return false;
1825 }
1826 }
1827 return true;
1828}
1829
1830
1831// TODO: This is copypaste from legalizeDAG. Because the
1832// routine in legalizeDAG is not public
1833SDValue
1834 TCETargetLowering::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node,
1835 bool isSigned, SelectionDAG &DAG) const {
1836
1837 TargetLowering::ArgListTy Args;
1838 TargetLowering::ArgListEntry Entry;
1839 for (const SDValue &Op : Node->op_values()) {
1840 EVT ArgVT = Op.getValueType();
1841 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
1842 Entry.Node = Op;
1843 Entry.Ty = ArgTy;
1844 Entry.IsSExt = shouldSignExtendTypeInLibCall(ArgVT, isSigned);
1845 Entry.IsZExt = !shouldSignExtendTypeInLibCall(ArgVT, isSigned);
1846 Args.push_back(Entry);
1847 }
1848 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
1849 getPointerTy(DAG.getDataLayout(),0));
1850
1851 EVT RetVT = Node->getValueType(0);
1852 Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
1853
1854 // By default, the input chain to this libcall is the entry node of the
1855 // function. If the libcall is going to be emitted as a tail call then
1856 // TLI.isUsedByReturnOnly will change it to the right chain if the return
1857 // node which is being folded has a non-entry input chain.
1858 SDValue InChain = DAG.getEntryNode();
1859
1860 // isTailCall may be true since the callee does not reference caller stack
1861 // frame. Check if it's in the right position and that the return types match.
1862 SDValue TCChain = InChain;
1863 const Function &F = DAG.getMachineFunction().getFunction();
1864 bool isTailCall =
1865 isInTailCallPosition(DAG, Node, TCChain) &&
1866 (RetTy == F.getReturnType() || F.getReturnType()->isVoidTy());
1867 if (isTailCall)
1868 InChain = TCChain;
1869
1870 TargetLowering::CallLoweringInfo CLI(DAG);
1871 bool signExtend = shouldSignExtendTypeInLibCall(RetVT, isSigned);
1872 CLI.setDebugLoc(SDLoc(Node))
1873 .setChain(InChain)
1874 .setLibCallee(getLibcallCallingConv(LC), RetTy, Callee,
1875 std::move(Args))
1876 .setTailCall(isTailCall)
1877 .setSExtResult(signExtend)
1878 .setZExtResult(!signExtend)
1879 .setIsPostTypeLegalization(true);
1880
1881 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
1882
1883 if (!CallInfo.second.getNode()) {
1884 // It's a tailcall, return the chain (which is the DAG root).
1885 return DAG.getRoot();
1886 }
1887
1888 return CallInfo.first;
1889}
1890
1892 SDNode* node, SmallVectorImpl<SDValue>& Results,
1893 SelectionDAG& DAG) const {
1894 auto fnName = DAG.getMachineFunction().getName().str();
1895
1896 SDValue shiftedVal;
1897 SDValue truncAnd;
1898 if (node->getOpcode() == ISD::LOAD) {
1899 auto lsdn = dyn_cast<LoadSDNode>(node);
1900 if (lsdn == nullptr) {
1901 std::cerr << "Error: null loadsdnde!" << std::endl;
1902 return;
1903 }
1904 #ifdef LLVM_OLDER_THAN_16
1905 if (lsdn->getAlignment() < 2 &&
1906 #else
1907 if (lsdn->getAlign() < 2 &&
1908 #endif
1909 lsdn->getMemoryVT() != MVT::i8 && lsdn->getMemoryVT() != MVT::i1) {
1910 assert(0 && "Cannot lower 16-bit memory op with only one byte alignment");
1911 }
1912
1913 auto chain = node->getOperand(0);
1914
1915 SDValue load;
1916 SDValue lowBits;
1917 #ifdef LLVM_OLDER_THAN_16
1918 if (lsdn->getAlignment() >= 4) {
1919 #else
1920 if (lsdn->getAlign() >= 4) {
1921 #endif
1922 load = DAG.getLoad(
1923 MVT::i32, node, chain, lsdn->getBasePtr(), MachinePointerInfo());
1924 } else {
1925 auto alignedAddr =
1926 DAG.getNode(
1927 ISD::AND, node, MVT::i32, lsdn->getBasePtr(),
1928 DAG.getConstant(-4l, node, MVT::i32));
1929
1930 auto lowBytes = DAG.getNode(
1931 ISD::AND, node, MVT::i32, lsdn->getBasePtr(),
1932 DAG.getConstant(3l, node, MVT::i32));
1933
1934 lowBits = DAG.getNode(
1935 ISD::SHL, node, MVT::i32, lowBytes,
1936 DAG.getConstant(3l, node, MVT::i32));
1937
1938 load = DAG.getLoad(
1939 MVT::i32, node, chain, alignedAddr, MachinePointerInfo());
1940 }
1941
1942 // TODO: breaks with 64 bits!
1943 // TODO: also breaks with 16-bit floats?
1944 MVT vt = node->getSimpleValueType(0);
1945 if (vt == MVT::i32) {
1946 assert(0 && "Result i32? this should be extload?");
1947 Results.push_back(SDValue(load));
1948 Results.push_back(SDValue(load.getNode(),1));
1949 return;
1950 }
1951
1952 SDValue finalVal;
1953 if (lsdn->getExtensionType() == ISD::ZEXTLOAD) {
1954 #ifdef LLVM_OLDER_THAN_16
1955 shiftedVal = lsdn->getAlignment() < 4 ?
1956 #else
1957 shiftedVal = lsdn->getAlign() < 4 ?
1958 #endif
1959 DAG.getNode(ISD::SRA, node, MVT::i32, load, lowBits):
1960 load;
1961
1962 if (lsdn->getMemoryVT() == MVT::i1) {
1963 finalVal = DAG.getNode(
1964 ISD::AND, node, MVT::i32, shiftedVal,
1965 DAG.getConstant(1l, node, MVT::i32));
1966 } else if (lsdn->getMemoryVT() == MVT::i8) {
1967 finalVal = DAG.getNode(
1968 ISD::AND, node, MVT::i32, shiftedVal,
1969 DAG.getConstant(255l, node, MVT::i32));
1970 } else {
1971 // TODO: 64-bit port needs to add option for 32-bit here.
1972 assert(0 && "Wrong memory vt in zextload!");
1973 }
1974 } else if (lsdn->getExtensionType() == ISD::SEXTLOAD) {
1975 if (lsdn->getMemoryVT() == MVT::i1) {
1976 auto shiftsLeft =
1977 DAG.getNode(ISD::SUB, node, MVT::i32,
1978 DAG.getConstant(31l, node, MVT::i32),lowBits);
1979 auto shiftUp = DAG.getNode(
1980 ISD::SHL, node, MVT::i32, load, shiftsLeft);
1981 finalVal = DAG.getNode(
1982 ISD::SRA, node, MVT::i32, shiftUp,
1983 DAG.getConstant(31l, node, MVT::i32));
1984 } else if (lsdn->getMemoryVT() == MVT::i8) {
1985 auto shiftsLeft =
1986 DAG.getNode(ISD::SUB, node, MVT::i32,
1987 DAG.getConstant(24l, node, MVT::i32),lowBits);
1988 auto shiftUp = DAG.getNode(
1989 ISD::SHL, node, MVT::i32, load, shiftsLeft);
1990 finalVal = DAG.getNode(
1991 ISD::SRA, node, MVT::i32, shiftUp,
1992 DAG.getConstant(24l, node, MVT::i32));
1993 } else {
1994 // TODO: 64-bit port needs to add option for 32-bit here.
1995 assert(0 && "Wrong memory vt in sextload!");
1996 }
1997 } else { // anyext/noext.
1998 #ifdef LLVM_OLDER_THAN_16
1999 finalVal = lsdn->getAlignment() < 4 ?
2000 #else
2001 finalVal = lsdn->getAlign() < 4 ?
2002 #endif
2003 DAG.getNode(ISD::SRA, node, MVT::i32, load, lowBits):
2004 load;
2005 }
2006
2007 SDValue rv;
2008 if (vt == MVT::i16) {
2009 rv = DAG.getAnyExtOrTrunc(finalVal, node, MVT::i16);
2010 } else if (vt == MVT::i8) {
2011 rv = DAG.getAnyExtOrTrunc(finalVal, node, MVT::i8);
2012 } else if (vt == MVT::i1) {
2013 rv = DAG.getAnyExtOrTrunc(finalVal, node, MVT::i1);
2014 } else {
2015 assert(0 && "Wrong vt in load lowering!");
2016 }
2017
2018 Results.push_back(rv);
2019 Results.push_back(SDValue(load.getNode(),1));
2020 } else {
2021 assert(false && "ReplaceNodeResults not load!");
2022 }
2023}
2024
2025/**
2026 * Lowers extension load of 8- or 16-bit load to 32-bit little-endian load.
2027 */
2029 SDValue op,
2030 SelectionDAG& DAG) const {
2031
2032 auto lsdn = dyn_cast<LoadSDNode>(op.getNode());
2033 if (lsdn == nullptr) {
2034 assert(false && "Not a lodsdnode on LowerExtLoad!");
2035 }
2036
2037 auto chain = op.getOperand(0);
2038 SDValue alignedAddr;
2039 SDValue lowBits;
2040
2041 #ifdef LLVM_OLDER_THAN_16
2042 if (lsdn->getAlignment() >= 4) {
2043 #else
2044 if (lsdn->getAlign() >= 4) {
2045 #endif
2046 alignedAddr = lsdn->getBasePtr();
2047 lowBits = DAG.getConstant(0l, op, MVT::i32);
2048 } else {
2049 alignedAddr = DAG.getNode(
2050 ISD::AND, op, MVT::i32, lsdn->getBasePtr(),
2051 DAG.getConstant(-4l, op, MVT::i32));
2052
2053 auto lowBytes = DAG.getNode(
2054 ISD::AND, op, MVT::i32, lsdn->getBasePtr(),
2055 DAG.getConstant(3l, op, MVT::i32));
2056
2057 lowBits = DAG.getNode(
2058 ISD::SHL, op, MVT::i32, lowBytes,
2059 DAG.getConstant(3l, op, MVT::i32));
2060 }
2061
2062 auto load = DAG.getLoad(
2063 MVT::i32, op, chain, alignedAddr, MachinePointerInfo());
2064
2065 // this is little-endian code. big endian needs different.
2066 if (lsdn->getExtensionType() == ISD::ZEXTLOAD) {
2067 #ifdef LLVM_OLDER_THAN_16
2068 auto shiftedValue = lsdn->getAlignment() < 4 ?
2069 #else
2070 auto shiftedValue = lsdn->getAlign() < 4 ?
2071 #endif
2072 DAG.getNode(ISD::SRA, op, MVT::i32, load, lowBits) :
2073 load;
2074 if (lsdn->getMemoryVT() == MVT::i16) {
2075 #ifdef LLVM_OLDER_THAN_16
2076 assert(lsdn->getAlignment() >= 2 &&
2077 #else
2078 assert(lsdn->getAlign() >= 2 &&
2079 #endif
2080 "Cannot (yet?) emulate a 16-bit load which has 1-byte alignment. "
2081 " 16-bit memory operations needed to compile this code." );
2082 std::cerr << "\t\tSource is 16 bits." << std::endl;
2083 auto zext = DAG.getNode(
2084 ISD::AND, op, MVT::i32, shiftedValue,
2085 DAG.getConstant(65535l, op, MVT::i32));
2086 return zext;
2087 } else if (lsdn->getMemoryVT() == MVT::i8) {
2088 auto zext = DAG.getNode(
2089 ISD::AND, op, MVT::i32, shiftedValue,
2090 DAG.getConstant(255l, op, MVT::i32));
2091 return zext;
2092 } else if (lsdn->getMemoryVT() == MVT::i1) {
2093 auto zext = DAG.getNode(
2094 ISD::AND, op, MVT::i32, shiftedValue,
2095 DAG.getConstant(1l, op, MVT::i32));
2096 return zext;
2097 } else {
2098 assert(false && "Unknown data type on LowerSExtLoad!");
2099 }
2100 }
2101 if (lsdn->getExtensionType() == ISD::SEXTLOAD) {
2102
2103 // shift left to get it to upper bits, then arithmetic right.
2104 if (lsdn->getMemoryVT() == MVT::i16) {
2105 #ifdef LLVM_OLDER_THAN_16
2106 auto shiftsLeft = lsdn->getAlignment() < 4 ?
2107 #else
2108 auto shiftsLeft = lsdn->getAlign() < 4 ?
2109 #endif
2110 DAG.getNode(ISD::SUB, op, MVT::i32,
2111 DAG.getConstant(16l, op, MVT::i32),
2112 lowBits) :
2113 DAG.getConstant(16l, op, MVT::i32);
2114 auto shiftUp = DAG.getNode(
2115 ISD::SHL, op, MVT::i32, load, shiftsLeft);
2116 auto shiftDown = DAG.getNode(
2117 ISD::SRA, op, MVT::i32, shiftUp,
2118 DAG.getConstant(16l, op, MVT::i32));
2119 return shiftDown;
2120 } else if (lsdn->getMemoryVT() == MVT::i8) {
2121 #ifdef LLVM_OLDER_THAN_16
2122 auto shiftsLeft = lsdn->getAlignment() < 4 ?
2123 #else
2124 auto shiftsLeft = lsdn->getAlign() < 4 ?
2125 #endif
2126 DAG.getNode(ISD::SUB, op, MVT::i32,
2127 DAG.getConstant(24l, op, MVT::i32),
2128 lowBits) :
2129 DAG.getConstant(24l, op, MVT::i32);
2130 auto shiftUp =
2131 DAG.getNode(ISD::SHL, op, MVT::i32, load, shiftsLeft);
2132 auto shiftDown = DAG.getNode(
2133 ISD::SRA, op, MVT::i32, shiftUp,
2134 DAG.getConstant(24l, op, MVT::i32));
2135 return shiftDown;
2136 } else if (lsdn->getMemoryVT() == MVT::i1) {
2137 #ifdef LLVM_OLDER_THAN_16
2138 auto shiftsLeft = lsdn->getAlignment() < 4 ?
2139 #else
2140 auto shiftsLeft = lsdn->getAlign() < 4 ?
2141 #endif
2142 DAG.getNode(ISD::SUB, op, MVT::i32,
2143 DAG.getConstant(31l, op, MVT::i32),
2144 lowBits) :
2145 DAG.getConstant(31l, op, MVT::i32);
2146
2147 auto shiftUp =
2148 DAG.getNode(ISD::SHL, op, MVT::i32, load, shiftsLeft);
2149 auto shiftDown = DAG.getNode(
2150 ISD::SRA, op, MVT::i32, shiftUp,
2151 DAG.getConstant(31l, op, MVT::i32));
2152 return shiftDown;
2153 } else {
2154 assert(false && "Unknown data type on Lower(Z)ExtLoad!");
2155 }
2156 }
2157
2158 // anyext?
2159 if (lsdn->getExtensionType() == ISD::EXTLOAD) {
2160 #ifdef LLVM_OLDER_THAN_16
2161 auto shiftedValue = lsdn->getAlignment() < 4 ?
2162 #else
2163 auto shiftedValue = lsdn->getAlign() < 4 ?
2164 #endif
2165 DAG.getNode(ISD::SRA, op, MVT::i32, load, lowBits) :
2166 load;
2167 auto shiftDown = DAG.getNode(ISD::SRA, op, MVT::i32, load, lowBits);
2168 return shiftDown;
2169 } else {
2170 // normal, not-extload.
2171 MVT vt = op->getSimpleValueType(0);
2172 if (vt == MVT::i1 && lsdn->getMemoryVT() == MVT::i1) {
2173 SDValue trunc = DAG.getAnyExtOrTrunc(load, op, MVT::i1);
2174 return trunc;
2175 }
2176
2177 assert(false && "Should not be here, non-ext-load");
2178 }
2179 return SDValue();
2180}
#define assert(condition)
static RegisterPass< MachineDCE > R("machinedce","Symbol string based machine DCE for removing not used emulation functions", false, true)
#define DEFAULT_TYPE
#define DEFAULT_SIZE
#define DEFAULT_REG_CLASS
static SDValue LowerCONSTANTPOOL(SDValue Op, SelectionDAG &DAG)
#define SDLOC_PARAM_TYPE
static CmdLineOptions * cmdLineOptions()
static int verboseLevel()
static std::ostream & logStream()
static bool supportsPortGuardedJumps(const TTAMachine::Machine &machine)
static bool supportsBoolRegisterGuardedJumps(const TTAMachine::Machine &machine)
virtual AddressSpaceNavigator addressSpaceNavigator() const
Definition Machine.cc:392
virtual const TargetRegisterInfo * getRegisterInfo() const override
SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const
TCETargetLowering(TargetMachine &TM, const TCESubtarget &subt)
SDValue lowerFABS(SDValue op, SelectionDAG &dag) const
static bool isBroadcast(SDNode *n)
SDValue LowerShift(SDValue op, SelectionDAG &dag) const
void ReplaceNodeResults(SDNode *node, SmallVectorImpl< SDValue > &, SelectionDAG &) const override
virtual SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, SDLOC_PARAM_TYPE dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
getFunctionAlignment - Return the Log2 alignment of this function.
virtual bool isFPImmLegal(const APFloat &apf, EVT VT, bool forCodeSize) const override
SDValue LowerTRAP(SDValue Op, SelectionDAG &DAG) const
SDValue LowerConstant(SDValue Op, SelectionDAG &DAG) const
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
llvm::EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
SDValue LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const
virtual const char * getTargetNodeName(unsigned opcode) const override
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
bool hasI1RC_
Tells if the target machine has boolean register file.
std::pair< unsigned, const TargetRegisterClass * > associatedVectorRegClass(const EVT &vt) const
Implementation generated to Backend.inc from TDGenSIMD.cc.
const TargetRegisterClass * getVectorRegClassForInlineAsmConstraint(const TargetRegisterInfo *TRI, MVT VT) const
void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
void addVectorRegisterClasses()
Implementation generated to Backend.inc from TDGenSIMD.cc.
bool canEncodeImmediate(const ConstantSDNode &node) const
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
virtual SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, SDLOC_PARAM_TYPE dl, SelectionDAG &DAG) const override
virtual SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
SDValue lowerExtOrBoolLoad(SDValue op, SelectionDAG &DAG) const
std::vector< unsigned > getRegClassForInlineAsmConstraint(const std::string &Constraint, EVT VT) const
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
bool canEncodeConstantOperands(const SDNode &node) const
SDValue lowerHWLoops(SDValue op, SelectionDAG &dag) const
SDValue LowerBuildVector(SDValue Op, SelectionDAG &DAG) const
virtual llvm::EVT getSetCCResultVT(const EVT &VT) const
Implementation generated to Backend.inc from TDGenSIMD.cc.
bool isConstantOrUndefBuild(const SDNode &node) const
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint letter, return the type of constraint it is for this target.
bool shouldLoadFromConstantPool(unsigned addressSpace) const
SDValue LowerBuildBooleanVectorVector(SDValue Op, MVT newElementVT, int elemCount, SelectionDAG &DAG) const
std::map< unsigned, bool > loadGAFromConstantPool_
Predicates to tell whenever the addresses belonging to a address space should be loaded from constant...
SDValue ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, bool isSigned, SelectionDAG &DAG) const
void addVectorLowerings()
Implementation generated to Backend.inc from TDGenSIMD.cc.
std::pair< int, TCEString > getConstShiftNodeAndTCEOP(SDValue op) const
bool canMaterializeConstant(const ConstantInt &ci) const
const std::set< std::pair< unsigned, llvm::MVT::SimpleValueType > > * customLegalizedOperations()
virtual const TTAMachine::Machine & ttaMachine() const
const std::set< std::pair< unsigned, llvm::MVT::SimpleValueType > > * missingOperations()
bool canEncodeAsMOVI(const llvm::MVT &vt, int64_t val) const
unsigned llvmRegisterId(const TCEString &ttaRegister)
std::string rfName(unsigned dwarfRegNum) const
bool canEncodeAsMOVF(const llvm::APFloat &fp) const
bool hasOperation(TCEString operationName) const
const std::set< std::pair< unsigned, llvm::MVT::SimpleValueType > > * promotedOperations()
unsigned stackAlignment() const
uint64_t largestImmValue() const