OpenASIP  2.0
TCEISelLowering.cc
Go to the documentation of this file.
1 /*
2  Copyright (c) 2002-2013 Tampere University.
3 
4  This file is part of TTA-Based Codesign Environment (TCE).
5 
6  Permission is hereby granted, free of charge, to any person obtaining a
7  copy of this software and associated documentation files (the "Software"),
8  to deal in the Software without restriction, including without limitation
9  the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  and/or sell copies of the Software, and to permit persons to whom the
11  Software is furnished to do so, subject to the following conditions:
12 
13  The above copyright notice and this permission notice shall be included in
14  all copies or substantial portions of the Software.
15 
16  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22  DEALINGS IN THE SOFTWARE.
23  */
24 /**
25  * @file TCETargetLowering.cpp
26  *
27  * Implementation of TCETargetLowering class.
28  *
29  * @author Veli-Pekka Jääskeläinen 2007 (vjaaskel-no.spam-cs.tut.fi)
30  * @author Mikael Lepistö 2009 (mikael.lepisto-no.spam-tut.fi)
31  * @author Pekka Jääskeläinen 2010
32  * @author Heikki Kultala 2011-2012 (heikki.kultala-no.spam-tut.fi)
33  */
34 
35 #include <assert.h>
36 #include <string>
37 #include "tce_config.h"
38 #include <llvm/IR/Function.h>
39 #include <llvm/IR/DerivedTypes.h>
40 #include <llvm/IR/Intrinsics.h>
41 #include <llvm/IR/CallingConv.h>
42 #include <llvm/CodeGen/TargetLowering.h>
43 #include <llvm/CodeGen/CallingConvLower.h>
44 #include <llvm/CodeGen/SelectionDAG.h>
45 #include <llvm/CodeGen/MachineFrameInfo.h>
46 #include <llvm/CodeGen/MachineRegisterInfo.h>
47 #include <llvm/CodeGen/MachineInstrBuilder.h>
48 #include <llvm/Support/raw_ostream.h>
49 
50 #include <llvm/Target/TargetLoweringObjectFile.h>
51 
52 //#include <llvm/Config/config.h>
53 
54 #include "TCEPlugin.hh"
55 #include "TCERegisterInfo.hh"
56 #include "TCETargetMachine.hh"
57 #include "TCETargetObjectFile.hh"
58 #include "TCESubtarget.hh"
59 #include "TCEISelLowering.hh"
60 #include "tce_config.h"
61 #include "LLVMTCECmdLineOptions.hh"
62 #include "Application.hh"
63 #include "Machine.hh"
64 #include "AddressSpace.hh"
65 #include "MachineInfo.hh"
66 
67 #include "llvm/Support/ErrorHandling.h"
68 
69 #include <iostream> // DEBUG
70 
71 
72 #ifdef TARGET64BIT
73 #define DEFAULT_TYPE MVT::i64
74 #define DEFAULT_IMM_INSTR TCE::MOVI64sa
75 #define DEFAULT_SIZE 8
76 #define DEFAULT_REG_CLASS TCE::R64IRegsRegClass
77 #else
78 #define DEFAULT_TYPE MVT::i32
79 #define DEFAULT_IMM_INSTR TCE::MOVI32ri
80 #define DEFAULT_SIZE 4
81 #define DEFAULT_REG_CLASS TCE::R32IRegsRegClass
82 #endif
83 
84 
85 
86 using namespace llvm;
87 
88 //===----------------------------------------------------------------------===//
89 // Calling Convention Implementation
90 //===----------------------------------------------------------------------===//
91 
92 #include "TCEGenCallingConv.inc"
93 
94 #include "ArgRegs.hh"
95 
96 SDValue
98  CallingConv::ID CallConv, bool isVarArg,
99  const SmallVectorImpl<ISD::OutputArg> &Outs,
100  const SmallVectorImpl<SDValue> &OutVals,
101  SDLOC_PARAM_TYPE dl, SelectionDAG &DAG) const
102 {
103 
104  // CCValAssign - represent the assignment of the return value to locations.
105  SmallVector<CCValAssign, 16> RVLocs;
106 
107  // CCState - Info about the registers and stack slot.
108  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
109  RVLocs, *DAG.getContext());
110 
111  // Analize return values.
112  CCInfo.AnalyzeReturn(Outs, RetCC_TCE);
113 
114  SmallVector<SDValue, 4> RetOps(1, Chain);
115 
116  SDValue Flag;
117 
118  // Copy the result values into the output registers.
119  for (unsigned i = 0; i != RVLocs.size(); ++i) {
120  CCValAssign &VA = RVLocs[i];
121  assert(VA.isRegLoc() && "Can only return in registers!");
122 
123  Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
124  OutVals[i], Flag);
125 
126  // Guarantee that all emitted copies are stuck together with flags.
127  Flag = Chain.getValue(1);
128  RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
129  }
130 
131  RetOps[0] = Chain; // Update chain.
132 
133  // Add the flag if we have it.
134  if (Flag.getNode())
135  RetOps.push_back(Flag);
136 
137  return DAG.getNode(
138  TCEISD::RET_FLAG, dl, MVT::Other, ArrayRef<SDValue>(RetOps));
139 }
140 
141 /**
142  * Lowers formal arguments.
143  */
144 SDValue
146  SDValue Chain,
147  CallingConv::ID CallConv,
148  bool isVarArg,
149  const SmallVectorImpl<ISD::InputArg> &Ins,
150  SDLOC_PARAM_TYPE dl,
151  SelectionDAG &DAG,
152  SmallVectorImpl<SDValue> &InVals) const
153 {
154 
155  MachineFunction &MF = DAG.getMachineFunction();
156  auto& frameInfo = MF.getFrameInfo();
157  MachineRegisterInfo &RegInfo = MF.getRegInfo();
158 
159  // Assign locations to all of the incoming arguments.
160  SmallVector<CCValAssign, 16> ArgLocs;
161  CCState CCInfo(
162  CallConv, isVarArg, DAG.getMachineFunction(),
163  ArgLocs, *DAG.getContext());
164 
165  CCInfo.AnalyzeFormalArguments(Ins, CC_TCE);
166 
167  const unsigned *CurArgReg = ArgRegs, *ArgRegEnd = ArgRegs + argRegCount;
168  const unsigned maxMemAlignment = isVarArg ? 4 : tm_.stackAlignment();
169  unsigned ArgOffset = 0;
170 
171  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
172  SDValue ArgValue;
173  CCValAssign &VA = ArgLocs[i];
174  // FIXME: We ignore the register assignments of AnalyzeFormalArguments
175  // because it doesn't know how to split a double into two i32 registers.
176  EVT ObjectVT = VA.getValVT();
177  MVT sType = ObjectVT.getSimpleVT().SimpleTy;
178 
179  if (sType == MVT::i1 || sType == MVT::i8 || sType == MVT::i16 ||
180 #ifdef TARGET64BIT
181  sType == MVT::i64 ||
182 #endif
183  sType == MVT::i32) {
184  // There may be a bug that marked as not used if varargs
185  if (!Ins[i].Used) {
186  if (CurArgReg < ArgRegEnd) {
187  ++CurArgReg;
188  }
189 
190  InVals.push_back(DAG.getUNDEF(ObjectVT));
191  } else if (CurArgReg < ArgRegEnd && !isVarArg) {
192  unsigned VReg = RegInfo.createVirtualRegister(
194  MF.getRegInfo().addLiveIn(*CurArgReg++, VReg);
195  SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, DEFAULT_TYPE);
196  if (ObjectVT != DEFAULT_TYPE) {
197  unsigned AssertOp = ISD::AssertSext;
198  Arg = DAG.getNode(
199  AssertOp, dl, DEFAULT_TYPE, Arg,
200  DAG.getValueType(ObjectVT));
201  Arg = DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, Arg);
202  }
203  InVals.push_back(Arg);
204 
205  } else {
206  int FrameIdx = frameInfo.CreateFixedObject(
207  DEFAULT_SIZE, ArgOffset, /*immutable=*/true);
208 
209  SDValue FIPtr = DAG.getFrameIndex(FrameIdx, DEFAULT_TYPE);
210  SDValue Load;
211  if (ObjectVT == DEFAULT_TYPE) {
212  Load = DAG.getLoad(
213  DEFAULT_TYPE, dl, Chain, FIPtr, MachinePointerInfo());
214  } else {
215  ISD::LoadExtType LoadOp = ISD::SEXTLOAD;
216 
217 #ifndef LITTLE_ENDIAN_TARGET // big endian extload hack starts
218 
219  // TODO: WHAT IS THIS??
220  // TCE IS NO LONGER ALWAYS BIG-ENDIAN!
221  // TCE is big endian, add an offset based on the ObjectVT.
222  unsigned Offset = DEFAULT_SIZE - std::max(
223  1UL, ObjectVT.getSizeInBits().getFixedSize()/8);
224  FIPtr = DAG.getNode(
225  ISD::ADD, dl, DEFAULT_TYPE, FIPtr,
226  DAG.getConstant(Offset, dl, DEFAULT_TYPE));
227 
228 #endif // big endian hack ends
229 
230  Load = DAG.getExtLoad(
231  LoadOp, dl, DEFAULT_TYPE, Chain, FIPtr,
232  MachinePointerInfo(), ObjectVT);
233  Load = DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, Load);
234  }
235  InVals.push_back(Load);
236  }
237  } else if (sType == MVT::f16) {
238  if (!Ins[i].Used) { // Argument is dead.
239  if (CurArgReg < ArgRegEnd) {
240  ++CurArgReg;
241  }
242  InVals.push_back(DAG.getUNDEF(ObjectVT));
243  } else if (CurArgReg < ArgRegEnd && !isVarArg) {
244  unsigned VReg = RegInfo.createVirtualRegister(
245  &TCE::HFPRegsRegClass);
246  MF.getRegInfo().addLiveIn(*CurArgReg++, VReg);
247  SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f16);
248  InVals.push_back(Arg);
249  } else {
250  int FrameIdx = frameInfo.CreateFixedObject(
251  DEFAULT_SIZE, ArgOffset, /*immutable=*/true);
252  SDValue FIPtr = DAG.getFrameIndex(FrameIdx, DEFAULT_TYPE);
253  SDValue Load = DAG.getLoad(
254  MVT::f16, dl, Chain, FIPtr, MachinePointerInfo());
255  InVals.push_back(Load);
256  }
257  } else if (sType == MVT::f32 || sType == MVT::f64) {
258  if (!Ins[i].Used) { // Argument is dead.
259  if (CurArgReg < ArgRegEnd) {
260  ++CurArgReg;
261  }
262  InVals.push_back(DAG.getUNDEF(ObjectVT));
263  } else if (CurArgReg < ArgRegEnd && !isVarArg) { // reg argument
264  auto regClass = sType == MVT::f32 ?
265  &TCE::FPRegsRegClass:
266  &TCE::R64DFPRegsRegClass;
267  unsigned VReg = RegInfo.createVirtualRegister(regClass);
268  MF.getRegInfo().addLiveIn(*CurArgReg++, VReg);
269  SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, sType);
270  InVals.push_back(Arg);
271  } else { // argument in stack.
272  int FrameIdx = frameInfo.CreateFixedObject(
273  DEFAULT_SIZE, ArgOffset, /*immutable=*/true);
274  SDValue FIPtr = DAG.getFrameIndex(FrameIdx, DEFAULT_TYPE);
275  SDValue Load = DAG.getLoad(
276  sType, dl, Chain, FIPtr, MachinePointerInfo());
277  InVals.push_back(Load);
278  }
279  } else if (sType.isVector()) {
280  if (!Ins[i].Used) {
281  InVals.push_back(DAG.getUNDEF(ObjectVT));
282  } else {
283  int FrameIdx = MF.getFrameInfo().CreateFixedObject(
284  sType.getStoreSize(), ArgOffset, true);
285  SDValue FIPtr = DAG.getFrameIndex(FrameIdx, DEFAULT_TYPE);
286  SDValue Load = DAG.getLoad(
287  sType, dl, Chain, FIPtr, MachinePointerInfo());
288  InVals.push_back(Load);
289  }
290  } else {
291  std::cerr << "Unhandled argument type: "
292  << ObjectVT.getEVTString() << std::endl;
293  std::cerr << "sType size in bits: " << sType.getSizeInBits() << std::endl;
294  std::cerr << "is a vector? " << sType.isVector() << std::endl;
295  assert(false);
296  }
297 
298  unsigned argumentByteSize = sType.getStoreSize();
299 
300  // Align parameter to stack correctly.
301  if (argumentByteSize <= maxMemAlignment) {
302  ArgOffset += maxMemAlignment;
303  } else {
304  unsigned alignBytes = maxMemAlignment - 1;
305  ArgOffset += (argumentByteSize + alignBytes) & (~alignBytes);
306  }
307  }
308 
309  // inspired from ARM
310  if (isVarArg) {
311  /// @todo This probably doesn't work with vector arguments currently.
312  // This will point to the next argument passed via stack.
313 
314  VarArgsFrameOffset = frameInfo.CreateFixedObject(
315  DEFAULT_SIZE, ArgOffset, /*immutable=*/true);
316  }
317 
318  return Chain;
319 }
320 
321 
322 SDValue
323 TCETargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
324  SmallVectorImpl<SDValue> &InVals) const {
325 
326  SelectionDAG &DAG = CLI.DAG;
327  SDLoc &dl = CLI.DL;
328  SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
329  SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
330  SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
331  SDValue Chain = CLI.Chain;
332  SDValue Callee = CLI.Callee;
333  bool &isTailCall = CLI.IsTailCall;
334  CallingConv::ID CallConv = CLI.CallConv;
335  bool isVarArg = CLI.IsVarArg;
336 
337  // we do not yet support tail call optimization.
338  isTailCall = false;
339 
340  (void)CC_TCE;
341 
342  const unsigned maxMemAlignment = isVarArg? 4 : tm_.stackAlignment();
343  int regParams = 0;
344  unsigned ArgsSize = 0;
345 
346  // Count the size of the outgoing arguments.
347  for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
348  EVT ObjectVT = Outs[i].VT;
349  MVT sType = Outs[i].VT.SimpleTy;
350 #ifndef TARGET64BIT
351  if (sType == MVT::i1 || sType == MVT::i8 || sType == MVT::i16 ||
352  sType == MVT::i32 || sType == MVT::f16 || sType == MVT::f32) {
353  if (regParams < argRegCount) {
354  regParams++;
355  }
356  } else if (sType == MVT::i64 || sType == MVT::f64) {
357  // Nothing to do.
358  } else if (sType.isVector()) {
359  // Nothing to do.
360  } else {
361  std::cerr << "Unknown argument type: "
362  << ObjectVT.getEVTString() << std::endl;
363  assert(false);
364  }
365 #else
366  if (sType == MVT::i1 || sType == MVT::i8 || sType == MVT::i16 ||
367  sType == MVT::i32 || sType == MVT::i64 || sType == MVT::f16 ||
368  sType == MVT::f32 || sType == MVT::f64) {
369  if (regParams < argRegCount) {
370  regParams++;
371  }
372  } else if (sType.isVector()) {
373  // Nothing to do.
374  } else {
375  std::cerr << "Unknown argument type: "
376  << ObjectVT.getEVTString() << std::endl;
377  assert(false);
378  }
379 #endif
380 
381  unsigned argumentByteSize = sType.getStoreSize();
382 
383  // Align parameter to stack correctly.
384  if (argumentByteSize <= maxMemAlignment) {
385  ArgsSize += maxMemAlignment;
386  } else {
387  unsigned alignBytes = maxMemAlignment - 1;
388  ArgsSize += (argumentByteSize + alignBytes) & (~alignBytes);
389  }
390  }
391  Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, dl);
392  SmallVector<SDValue, 8> MemOpChains;
393 
394  SmallVector<std::pair<unsigned, SDValue>, argRegCount> RegsToPass;
395 
396  unsigned ArgOffset = 0;
397 
398  for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
399  SDValue Val = OutVals[i];
400  EVT ObjectVT = Val.getValueType();
401  MVT sType = ObjectVT.getSimpleVT().SimpleTy;
402  SDValue ValToStore(0, 0);
403 
404 #ifndef TARGET64BIT
405  if (sType == MVT::i1 || sType == MVT::i8 || sType == MVT::i16 ||
406  sType == MVT::i32 || sType == MVT::f32 || sType == MVT::f16) {
407  if (RegsToPass.size() >= argRegCount || isVarArg) {
408  ValToStore = Val;
409  }
410  if (RegsToPass.size() < argRegCount) {
411  RegsToPass.push_back(
412  std::make_pair(ArgRegs[RegsToPass.size()], Val));
413  }
414  } else if (sType.isVector()) {
415  ValToStore = Val;
416  } else {
417  std::cerr << "Unknown argument type: "
418  << ObjectVT.getEVTString() << std::endl;
419  assert(false);
420  }
421 #else // is 64-bit
422  if (sType == MVT::i1 || sType == MVT::i8 || sType == MVT::i16 ||
423  sType == MVT::i32 || sType == MVT::i64 || sType == MVT::f32 ||
424  sType == MVT::f64) {
425  if (RegsToPass.size() >= argRegCount || isVarArg) {
426  ValToStore = Val;
427  }
428  if (RegsToPass.size() < argRegCount) {
429  RegsToPass.push_back(
430  std::make_pair(ArgRegs[RegsToPass.size()], Val));
431  }
432  } else if (sType.isVector()) {
433  ValToStore = Val;
434  } else {
435  std::cerr << "Unknown argument type: "
436  << ObjectVT.getEVTString() << std::endl;
437  assert(false);
438  }
439 #endif
440 
441  if (ValToStore.getNode()) {
442  SDValue StackPtr = DAG.getCopyFromReg(
443  Chain, dl, TCE::SP, getPointerTy(
444  getTargetMachine().createDataLayout(), 0));
445  SDValue PtrOff = DAG.getConstant(ArgOffset, dl, DEFAULT_TYPE);
446  PtrOff = DAG.getNode(ISD::ADD, dl, DEFAULT_TYPE, StackPtr, PtrOff);
447 
448  MemOpChains.push_back(DAG.getStore(Chain, dl, ValToStore,
449  PtrOff, MachinePointerInfo()));
450  }
451 
452  unsigned argumentByteSize = sType.getStoreSize();
453 
454  // Align parameter to stack correctly.
455  if (argumentByteSize <= maxMemAlignment) {
456  ArgOffset += maxMemAlignment;
457  } else {
458  unsigned alignBytes = maxMemAlignment - 1;
459  ArgOffset += (argumentByteSize + alignBytes) & (~alignBytes);
460  }
461  }
462 
463  // Emit all stores, make sure the occur before any copies into physregs.
464  if (!MemOpChains.empty()) {
465  Chain = DAG.getNode(
466  ISD::TokenFactor, dl, MVT::Other, ArrayRef<SDValue>(MemOpChains));
467  }
468 
469  // Build a sequence of copy-to-reg nodes chained together with token
470  // chain and flag operands which copy the outgoing args into registers.
471  // The InFlag in necessary since all emited instructions must be
472  // stuck together.
473  SDValue InFlag;
474 
475  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
476  unsigned Reg = RegsToPass[i].first;
477  Chain = DAG.getCopyToReg(Chain, dl, Reg, RegsToPass[i].second, InFlag);
478  InFlag = Chain.getValue(1);
479  }
480 
481  // If the callee is a GlobalAddress node (quite common, every direct call is)
482  // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
483  // Likewise ExternalSymbol -> TargetExternalSymbol.
484  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
485  Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, DEFAULT_TYPE);
486  else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
487  Callee = DAG.getTargetExternalSymbol(E->getSymbol(), DEFAULT_TYPE);
488  std::vector<EVT> NodeTys;
489  NodeTys.push_back(MVT::Other); // Returns a chain
490  NodeTys.push_back(MVT::Glue); // Returns a flag for retval copy to use.
491  SDValue Ops[] = { Chain, Callee, InFlag };
492 
493  Chain = DAG.getNode(
494  TCEISD::CALL, dl, ArrayRef<EVT>(NodeTys),
495  ArrayRef<SDValue>(Ops, InFlag.getNode() ? 3 : 2));
496 
497  InFlag = Chain.getValue(1);
498 
499  Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, dl, true),
500  DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
501  InFlag = Chain.getValue(1);
502 
503  // Assign locations to each value returned by this call.
504  SmallVector<CCValAssign, 16> RVLocs;
505  CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(),
506  RVLocs, *DAG.getContext());
507 
508  RVInfo.AnalyzeCallResult(Ins, RetCC_TCE);
509 
510  // Copy all of the result registers out of their specified physreg. (only one rv reg)
511  for (unsigned i = 0; i != RVLocs.size(); ++i) {
512  unsigned Reg = RVLocs[i].getLocReg();
513 
514  Chain = DAG.getCopyFromReg(Chain, dl, Reg,
515  RVLocs[i].getValVT(), InFlag).getValue(1);
516  InFlag = Chain.getValue(2);
517  InVals.push_back(Chain.getValue(0));
518  }
519 
520  return Chain;
521 }
522 
523 /**
524  * The Constructor.
525  *
526  * Initializes the target lowering.
527  */
529  TargetMachine &TM, const TCESubtarget &subt)
530  : TargetLowering(TM), tm_(static_cast<TCETargetMachine &>(TM)) {
531  LLVMTCECmdLineOptions* opts = dynamic_cast<LLVMTCECmdLineOptions*>(
533 
534  if (opts != NULL && opts->conservativePreRAScheduler()) {
535  setSchedulingPreference(llvm::Sched::RegPressure);
536  }
537 
539  if (hasI1RC_)
540  addRegisterClass(MVT::i1, &TCE::R1RegsRegClass);
541 
542 #ifdef TARGET64BIT
543  addRegisterClass(MVT::i64, &TCE::R64IRegsRegClass);
544  addRegisterClass(MVT::f64, &TCE::R64DFPRegsRegClass);
545 #else
546  addRegisterClass(MVT::i32, &TCE::R32IRegsRegClass);
547 #endif
548  addRegisterClass(MVT::f32, &TCE::FPRegsRegClass);
549  addRegisterClass(MVT::f16, &TCE::HFPRegsRegClass);
550 
551  setOperationAction(ISD::UINT_TO_FP, MVT::i1 , Promote);
552  setOperationAction(ISD::UINT_TO_FP, MVT::i8 , Promote);
553  setOperationAction(ISD::UINT_TO_FP, MVT::i16 , Promote);
554 
555  setOperationAction(ISD::SINT_TO_FP, MVT::i1 , Promote);
556  setOperationAction(ISD::SINT_TO_FP, MVT::i8 , Promote);
557  setOperationAction(ISD::SINT_TO_FP, MVT::i16 , Promote);
558 
559  setOperationAction(ISD::FP_TO_UINT, MVT::i1 , Promote);
560  setOperationAction(ISD::FP_TO_UINT, MVT::i8 , Promote);
561  setOperationAction(ISD::FP_TO_UINT, MVT::i16 , Promote);
562 
563  setOperationAction(ISD::FP_TO_SINT, MVT::i1 , Promote);
564  setOperationAction(ISD::FP_TO_SINT, MVT::i8 , Promote);
565  setOperationAction(ISD::FP_TO_SINT, MVT::i16 , Promote);
566 
567  setOperationAction(ISD::GlobalAddress, DEFAULT_TYPE, Custom);
568  setOperationAction(ISD::BlockAddress, DEFAULT_TYPE, Custom);
569  setOperationAction(ISD::ConstantPool , DEFAULT_TYPE, Custom);
570 
571  setOperationAction(ISD::TRAP, MVT::Other, Custom);
572 
573 // TODO: define TCE instruction for leading/trailing zero count
574  setOperationAction(ISD::CTLZ, DEFAULT_TYPE, Expand);
575  setOperationAction(ISD::CTTZ, DEFAULT_TYPE, Expand);
576  setOperationAction(ISD::CTPOP, DEFAULT_TYPE, Expand);
577  // Using 'old way' MVT::Other to cover all value types is illegal now.
578  setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
579  setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
580  setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
581  setOperationAction(ISD::SELECT_CC, MVT::f80, Expand);
582  setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
583  setOperationAction(ISD::SELECT_CC, MVT::i8, Expand);
584  setOperationAction(ISD::SELECT_CC, MVT::i16, Expand);
585  setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
586  setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
587 
588 
589  // not needed when we uses xor for boolean comparison
590 // setOperationAction(ISD::SETCC, MVT::i1, Promote);
591 
592  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
593 
594  // Expand indirect branches.
595  setOperationAction(ISD::BRIND, MVT::Other, Expand);
596  // Expand jumptable branches.
597  setOperationAction(ISD::BR_JT, MVT::Other, Expand);
598  // Expand conditional branches.
599 
600  // only port-guarded jumps..
603  std::cerr << "Only port guarded jumps supported, not expanding bc_cc" << std::endl;
604 
605  setOperationAction(ISD::BRCOND, MVT::Other, Expand);
606  setOperationAction(ISD::BRCOND, MVT::i1, Expand);
607  setOperationAction(ISD::BRCOND, MVT::i32, Expand);
608  setOperationAction(ISD::BRCOND, MVT::f16, Expand);
609  setOperationAction(ISD::BRCOND, MVT::f32, Expand);
610  setOperationAction(ISD::BRCOND, MVT::i64, Expand);
611  } else {
612  setOperationAction(ISD::BR_CC, MVT::Other, Expand);
613  setOperationAction(ISD::BR_CC, MVT::i1, Expand);
614  setOperationAction(ISD::BR_CC, MVT::i32, Expand);
615  setOperationAction(ISD::BR_CC, MVT::f16, Expand);
616  setOperationAction(ISD::BR_CC, MVT::f32, Expand);
617  setOperationAction(ISD::BR_CC, MVT::i64, Expand);
618  }
619 
620  // Hardware loop ops
621  setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
622  if (!opts->disableHWLoops()) {
623  setTargetDAGCombine(ISD::BRCOND);
624  }
625 
626 #ifdef TARGET64BIT
627  setOperationAction(ISD::BR_CC, MVT::f64, Expand);
628 #endif
629  setOperationAction(ISD::MULHU, MVT::i32, Expand);
630  setOperationAction(ISD::MULHS, MVT::i32, Expand);
631 
632 #ifdef TARGET64BIT
633  setOperationAction(ISD::MULHU, MVT::i64, Expand);
634  setOperationAction(ISD::MULHS, MVT::i64, Expand);
635 #endif
636  setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
637  setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
638  setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
639 
640  setOperationAction(ISD::VASTART , MVT::Other, Custom);
641 
642  setOperationAction(ISD::VAARG , MVT::Other, Expand);
643  setOperationAction(ISD::VACOPY , MVT::Other, Expand);
644  setOperationAction(ISD::VAEND , MVT::Other, Expand);
645  setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
646  setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);
647 
648  setOperationAction(ISD::DYNAMIC_STACKALLOC, DEFAULT_TYPE, Expand);
649 
650  setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
651  setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
652 
653  setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
654 
655  setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
656  setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
657  setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
658  setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
659 
660  setOperationAction(ISD::BSWAP, DEFAULT_TYPE, Expand);
661 
662  setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
663  setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
664 
665 #ifdef TARGET64BIT
666  setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
667  setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
668  setTruncStoreAction(MVT::f64, MVT::f32, Expand);
669 #endif
670 
671  setTruncStoreAction(MVT::f32, MVT::f16, Expand);
672  // 3.7 requires the types as target type second parameter,
673  // mem type thid parameter
674  setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
675  setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2f16, Expand);
676  setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Expand);
677  setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, MVT::v8f16, Expand);
678  setLoadExtAction(ISD::EXTLOAD, MVT::v16f32, MVT::v16f16, Expand);
679  setLoadExtAction(ISD::EXTLOAD, MVT::v32f32, MVT::v32f16, Expand);
680 
681 #ifdef TARGET64BIT
682  setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
683 #endif
684 
685 #if LLVM_HAS_CUSTOM_VECTOR_EXTENSION == 2
686  setLoadExtAction(ISD::EXTLOAD, MVT::v64f32, MVT::v64f16, Expand);
687  setLoadExtAction(ISD::EXTLOAD, MVT::v128f32, MVT::v128f16, Expand);
688 #endif
689 
690  if (!tm_.has8bitLoads()) {
691  if (Application::verboseLevel() > 0) {
692  std::cout << "No 8-bit loads in the processor. "
693  << "Emulating 8-bit loads with wider loads. "
694  << "This may be very slow if the program performs "
695  << "lots of 8-bit loads." << std::endl;
696  }
697 
698 #ifdef TARGET64BIT
699  setLoadExtAction(ISD::EXTLOAD, MVT::i64, MVT::i8, Custom);
700  setLoadExtAction(ISD::SEXTLOAD, MVT::i64, MVT::i8, Custom);
701  setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, MVT::i8, Custom);
702  setOperationAction(ISD::LOAD, MVT::i8, Custom);
703  setOperationAction(ISD::LOAD, MVT::i1, Custom);
704 
705  setLoadExtAction(ISD::EXTLOAD, MVT::i64, MVT::i1, Custom);
706  setLoadExtAction(ISD::SEXTLOAD, MVT::i64, MVT::i1, Custom);
707  setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, MVT::i1, Custom);
708 #else
709  setLoadExtAction(ISD::EXTLOAD, MVT::i32, MVT::i8, Custom);
710  setLoadExtAction(ISD::SEXTLOAD, MVT::i32, MVT::i8, Custom);
711  setLoadExtAction(ISD::ZEXTLOAD, MVT::i32, MVT::i8, Custom);
712  setOperationAction(ISD::LOAD, MVT::i8, Custom);
713  setOperationAction(ISD::LOAD, MVT::i1, Custom);
714 
715  setLoadExtAction(ISD::EXTLOAD, MVT::i32, MVT::i1, Custom);
716  setLoadExtAction(ISD::SEXTLOAD, MVT::i32, MVT::i1, Custom);
717  setLoadExtAction(ISD::ZEXTLOAD, MVT::i32, MVT::i1, Custom);
718 #endif
719  }
720 
721  if (!tm_.has16bitLoads()) {
722  if (Application::verboseLevel() > 0) {
723  std::cout << "No 16-bit loads in the processor. "
724  << "Emulating 16-bit loads with wider loads. "
725  << "This may be very slow if the program performs "
726  << "lots of 16-bit loads." << std::endl;
727  }
728 #ifdef TARGET64BIT
729  setLoadExtAction(ISD::EXTLOAD, MVT::i64, MVT::i16, Custom);
730  setLoadExtAction(ISD::SEXTLOAD, MVT::i64, MVT::i16, Custom);
731  setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, MVT::i16, Custom);
732  setOperationAction(ISD::LOAD, MVT::i16, Custom);
733 #else
734  setLoadExtAction(ISD::EXTLOAD, MVT::i32, MVT::i16, Custom);
735  setLoadExtAction(ISD::SEXTLOAD, MVT::i32, MVT::i16, Custom);
736  setLoadExtAction(ISD::ZEXTLOAD, MVT::i32, MVT::i16, Custom);
737  setOperationAction(ISD::LOAD, MVT::i16, Custom);
738 #endif
739  }
740 
741  setOperationAction(ISD::ADDE, MVT::i32, Expand);
742  setOperationAction(ISD::ADDC, MVT::i32, Expand);
743  setOperationAction(ISD::ADDE, MVT::i16, Expand);
744  setOperationAction(ISD::ADDC, MVT::i16, Expand);
745  setOperationAction(ISD::ADDE, MVT::i8, Expand);
746  setOperationAction(ISD::ADDC, MVT::i8, Expand);
747 #ifdef TARGET64BIT
748  setOperationAction(ISD::Constant, MVT::i64, Custom);
749 #else
750  setOperationAction(ISD::Constant, MVT::i32, Custom);
751 #endif
752 
753  setStackPointerRegisterToSaveRestore(TCE::SP);
754 
755  // Set missing operations that can be emulated with emulation function
756  // or LLVM built-in emulation pattern to be expanded.
757  const std::set<std::pair<unsigned, llvm::MVT::SimpleValueType> >*
758  missingOps = tm_.missingOperations();
759 
760  std::set<std::pair<unsigned, llvm::MVT::SimpleValueType> >::const_iterator
761  iter = missingOps->begin();
762 
763  if (Application::verboseLevel() > 0) {
764  Application::logStream() << "Missing ops: ";
765  }
766 
767  while (iter != missingOps->end()) {
768  unsigned nodetype = (*iter).first;
769  llvm::MVT::SimpleValueType valuetype = (*iter).second;
770  if (Application::verboseLevel() > 0) {
771  switch (nodetype) {
772  case ISD::SDIV: std::cerr << "SDIV,"; break;
773  case ISD::UDIV: std::cerr << "UDIV,"; break;
774  case ISD::SREM: std::cerr << "SREM,"; break;
775  case ISD::UREM: std::cerr << "UREM,"; break;
776  case ISD::ROTL: std::cerr << "ROTL,"; break;
777  case ISD::ROTR: std::cerr << "ROTR,"; break;
778  case ISD::MUL: std::cerr << "MUL,"; break;
779  case ISD::SIGN_EXTEND_INREG:
780  if (valuetype == MVT::i8) std::cerr << "SXQW,";
781  if (valuetype == MVT::i16) std::cerr << "SXHW,";
782  break;
783  default: std::cerr << nodetype << ", "; break;
784  };
785  }
786  setOperationAction(nodetype, valuetype, Expand);
787  iter++;
788  }
789 
790  const std::set<std::pair<unsigned, llvm::MVT::SimpleValueType> >*
791  promotedOps = tm_.promotedOperations();
792 
793  iter = promotedOps->begin();
794  while (iter != promotedOps->end()) {
795  unsigned nodetype = (*iter).first;
796  llvm::MVT::SimpleValueType valuetype = (*iter).second;
797  llvm::EVT evt(valuetype);
798  setOperationAction(nodetype, valuetype, Promote);
799  iter++;
800  }
801 
802  if (Application::verboseLevel() > 0) {
803  std::cerr << std::endl;
804  }
805 
806  auto customLegalizedOps = tm_.customLegalizedOperations();
807  for (auto i : *customLegalizedOps) {
808  unsigned nodetype = i.first;
809  llvm::MVT::SimpleValueType valuetype = i.second;
810  llvm::EVT evt(valuetype);
811  setOperationAction(nodetype, valuetype, Custom);
812  }
813 
814  setJumpIsExpensive(true);
815 
816  //setShouldFoldAtomicFences(true);
817 
818  PredictableSelectIsExpensive = false;
819 
820  // Determine which of global addresses by address space id should be //
821  // loaded from constant pool due to limited immediate support. //
822  // Reverse for default address space.
823  loadGAFromConstantPool_[0] = false;
824  for (const auto& as : tm_.ttaMachine().addressSpaceNavigator()) {
825  if (as->numericalIds().empty()) {
826  // No IDs specified, assume default address space ID (0)
827  if (as->end() > tm_.largestImmValue()) {
828  if (Application::verboseLevel() > 0) {
829  std::cerr << "Global addresses by "
830  << "address space id of 0"
831  << " (implicitly specified by AS: " << as->name()
832  << ") will be stored in constant pool."
833  << std::endl;
834  }
835  loadGAFromConstantPool_[0] = true;
836  } else {
837  loadGAFromConstantPool_[0] |= false;
838  }
839  continue;
840  }
841 
842  for (unsigned id : as->numericalIds()) {
843  if (as->end() > tm_.largestImmValue()) {
844  if (Application::verboseLevel() > 0) {
845  std::cerr << "Global addresses belonging to "
846  << "address space id of " << id
847  << " (specified by AS: " << as->name()
848  << ") will be stored in constant pool."
849  << std::endl;
850  }
851  loadGAFromConstantPool_[id] = true;
852  } else {
853  loadGAFromConstantPool_[id] |= false;
854  }
855  }
856  }
857 
858  setBooleanContents(ZeroOrOneBooleanContent);
859  setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
860 
863  computeRegisterProperties(subt.getRegisterInfo());
864 }
865 
866 /**
867  * Returns target node opcode names for debugging purposes.
868  *
869  * @param opcode Opcode to convert to string.
870  * @return Opcode name.
871  */
872 const char*
873 TCETargetLowering::getTargetNodeName(unsigned opcode) const {
874  switch (opcode) {
875  default: return NULL;
876  case TCEISD::CALL: return "TCEISD::CALL";
877  case TCEISD::RET_FLAG: return "TCEISD::RET_FLAG";
878  case TCEISD::GLOBAL_ADDR: return "TCEISD::GLOBAL_ADDR";
879  case TCEISD::CONST_POOL: return "TCEISD::CONST_POOL";
880  case TCEISD::FTOI: return "TCEISD::FTOI";
881  case TCEISD::ITOF: return "TCEISD::ITOF";
882  case TCEISD::SELECT_I1: return "TCEISD::SELECT_I1";
883  case TCEISD::SELECT_I8: return "TCEISD::SELECT_I8";
884  case TCEISD::SELECT_I16: return "TCEISD::SELECT_I16";
885  case TCEISD::SELECT_I32: return "TCEISD::SELECT_I32";
886  case TCEISD::SELECT_I64: return "TCEISD::SELECT_I64";
887  case TCEISD::SELECT_F16: return "TCEISD::SELECT_F16";
888  case TCEISD::SELECT_F32: return "TCEISD::SELECT_F32";
889  case TCEISD::SELECT_F64: return "TCEISD::SELECT_F64";
890  }
891 }
892 
893 SDValue TCETargetLowering::LowerTRAP(SDValue Op, SelectionDAG &DAG) const {
894  SDLoc dl(Op);
895 
896  TargetLowering::ArgListTy Args;
897 
898  TargetLowering::CallLoweringInfo CLI(DAG);
899  CLI.setDebugLoc(dl);
900  CLI.setChain(Op->getOperand(0));
901  CLI.setCallee(
902  CallingConv::C,
903  Type::getVoidTy(*DAG.getContext()),
904  DAG.getExternalSymbol("_exit",
905  getPointerTy(getTargetMachine().createDataLayout(), 0)),
906  std::move(Args));
907  CLI.setInRegister(false);
908  CLI.setNoReturn(true);
909  CLI.setVarArg(false);
910  CLI.setTailCall(false);
911  CLI.setDiscardResult(false);
912  CLI.setSExtResult(false);
913  CLI.setZExtResult(false);
914 
915  std::pair<SDValue, SDValue> CallResult =
916  LowerCallTo(CLI);
917  return CallResult.second;
918 
919 }
920 
921 
922 SDValue
923 TCETargetLowering::LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const {
924  const GlobalAddressSDNode* gn = cast<GlobalAddressSDNode>(Op);
925  const GlobalValue* gv = gn->getGlobal();
926  // FIXME there isn't really any debug info here
927  SDLoc dl(Op);
928 
929 #if 0
930  std::cerr << "lowering GA: AS = " << gn->getAddressSpace() << ", ";
931  gv->getValueType()->dump();
932 #endif
933 
934  if (shouldLoadFromConstantPool(gn->getAddressSpace())) {
935  // Immediate support for the address space is limited. Therefore,
936  // the address must be loaded from constant pool.
937  auto vt = getPointerTy(DAG.getDataLayout(), gn->getAddressSpace());
938  SDValue cpIdx = DAG.getConstantPool(
939  gv, getPointerTy(DAG.getDataLayout()));
940  llvm::Align Alignment = cast<ConstantPoolSDNode>(cpIdx)->getAlign();
941  SDValue result = DAG.getLoad(vt, dl, DAG.getEntryNode(), cpIdx,
942  MachinePointerInfo::getConstantPool(DAG.getMachineFunction())
943  );
944 
945  if (Application::verboseLevel() > 0) {
946  std::cerr << "Expanded Global Value to a load from "
947  << "the constant pool." << std::endl;
948  }
949  return result;
950  }
951  SDValue tga = DAG.getTargetGlobalAddress(gv, dl, DEFAULT_TYPE);
952  return DAG.getNode(TCEISD::GLOBAL_ADDR, SDLoc(Op), DEFAULT_TYPE, tga);
953 }
954 
955 SDValue
956 TCETargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
957  const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
958 
959  SDValue BA_SD = DAG.getTargetBlockAddress(BA, DEFAULT_TYPE);
960  SDLoc dl(Op);
961  return DAG.getNode(
962  TCEISD::BLOCK_ADDR, dl,
963  getPointerTy(getTargetMachine().createDataLayout(), 0), BA_SD);
964 }
965 
966 static SDValue LowerCONSTANTPOOL(SDValue Op, SelectionDAG &DAG) {
967  // TODO: Check this.
968  llvm::MVT ptrVT = Op.getValueType().getSimpleVT();
969  ConstantPoolSDNode* cp = cast<ConstantPoolSDNode>(Op);
970  SDValue res;
971  if (cp->isMachineConstantPoolEntry()) {
972  res = DAG.getTargetConstantPool(
973  cp->getMachineCPVal(), ptrVT,
974  cp->getAlign());
975  } else {
976  res = DAG.getTargetConstantPool(
977  cp->getConstVal(), ptrVT,
978  cp->getAlign());
979 
980  }
981  return DAG.getNode(TCEISD::CONST_POOL, SDLoc(Op), DEFAULT_TYPE, res);
982 }
983 
984 SDValue
985 TCETargetLowering::LowerConstant(SDValue Op, SelectionDAG &DAG) const {
986  ConstantSDNode* cn = cast<ConstantSDNode>(Op.getNode());
987  assert(cn);
988 
989  if (canEncodeImmediate(*cn)) {
990  return Op;
991  } else {
992  // The constant is not supported immediate, return empty SDValue, so
993  // it gets converted to a load from a constant pool.
994  if (Application::verboseLevel() > 0) {
995  std::cerr << "Expand constant of " << cn->getSExtValue();
996  std::cerr << " to a load from the constant pool." << std::endl;
997  }
998  // Since LLVM 3.8 LLVM's DAG Legalization does the expansion from
999  // constant to constant pool load.
1000  return SDValue(nullptr, 0);
1001  }
1002 }
1003 
1005 SDValue Op, MVT newElementVT, int elemCount, SelectionDAG &DAG) const {
1006 
1007  BuildVectorSDNode* node = cast<BuildVectorSDNode>(Op);
1008  MVT mvt = Op.getSimpleValueType();
1009  int laneWidth = newElementVT.getSizeInBits();
1010 
1011  std::vector<SDValue> packedConstants(elemCount/laneWidth);
1012  for (int i = 0; i < elemCount; i+=laneWidth) {
1013  unsigned int packedVal = 0;
1014  for (int j = 0; j < laneWidth; j++) {
1015  const SDValue& operand = node->getOperand(i+j);
1016  SDNode* opdNode = operand.getNode();
1017  if (isa<ConstantSDNode>(opdNode)) {
1018  ConstantSDNode* cn = cast<ConstantSDNode>(opdNode);
1019  if (cn->isOne()) {
1020  packedVal += (1<< j);
1021  }
1022  }
1023  }
1024  packedConstants[i/laneWidth] = DAG.getConstant(packedVal, Op, newElementVT);
1025  }
1026  EVT wvt = EVT::getVectorVT(*DAG.getContext(), newElementVT, elemCount/laneWidth);
1027  SDValue intVectorBuild = DAG.getNode(ISD::BUILD_VECTOR, Op, wvt, packedConstants);
1028  SDValue retValue = DAG.getNode(ISD::BITCAST, Op, mvt, intVectorBuild);
1029  return retValue;
1030 }
1031 
1032 SDValue
1033 TCETargetLowering::LowerBuildVector(SDValue Op, SelectionDAG &DAG) const {
1034 
1035  MVT elemVT = Op.getSimpleValueType().getScalarType();
1036  BuildVectorSDNode* node = cast<BuildVectorSDNode>(Op);
1037  int elemCount = node->getNumOperands();
1038 
1039  if (isConstantOrUndefBuild(*node)) {
1040  if (!isBroadcast(node)) {
1041  // Convert boolean vector into wider vector.
1042  // Use int here.
1043 
1044  auto vt = Op.getValueType();
1045  bool scalarizedPack = false;
1046  if (vt.isVector() && vt.getSizeInBits() == 32) {
1047  unsigned int packedVal = 0;
1048  unsigned int laneW = vt.getScalarSizeInBits();
1049  for (int i = 0;
1050  i < vt.getVectorElementCount().getKnownMinValue(); i++) {
1051  auto oprd = node->getOperand(i);
1052  ConstantSDNode* cn = cast<ConstantSDNode>(oprd);
1053  unsigned int val = cn->getZExtValue();
1054  val = val & (~0u >> (32 - laneW));
1055  packedVal |= (val << (laneW*i));
1056  }
1057  if (tm_.canEncodeAsMOVI(MVT::i32, packedVal)) {
1058  auto packedNode =
1059  DAG.getConstant(packedVal, Op, MVT::i32);
1060  return DAG.getNode(ISD::BITCAST, Op, vt, packedNode);
1061  }
1062  }
1063 
1064  if (elemVT == MVT::i1) {
1065  if (elemCount > 31) {
1066  assert(elemCount % 32 == 0);
1067  int intElemCount = elemCount/32;
1068  TCEString wideOpName = "PACK32X"; wideOpName << intElemCount;
1069  if (tm_.hasOperation(wideOpName)) {
1071  Op, MVT::i32, elemCount, DAG);
1072  }
1073  }
1074 /* TODO: this does not work if u16 and i8 value types not legal.
1075  if (elemCount > 15 && elemCount < 4096) {
1076  assert(elemCount % 16 == 0);
1077  int shortElemCount = elemCount/16;
1078  TCEString wideOpName = "PACK16X"; wideOpName << shortElemCount;
1079  if (tm_.hasOperation(wideOpName)) {
1080  return LowerBuildBooleanVectorVector(
1081  Op, MVT::i16, elemCount, DAG);
1082  }
1083  }
1084  if (elemCount > 7 && elemCount < 2048) {
1085  assert(elemCount % 8 == 0);
1086  int charElemCount = elemCount/8;
1087  TCEString wideOpName = "PACK8X"; wideOpName << charElemCount;
1088  if (tm_.hasOperation(wideOpName)) {
1089  return LowerBuildBooleanVectorVector(
1090  Op, MVT::i8, elemCount, DAG);
1091  }
1092  }
1093 */
1094  if (elemCount > 255) {
1095  std::cerr << "Warning: Lowering Boolean vector build with"
1096  << " more than 255 elements. LLVM does not"
1097  << " support instructions with more than"
1098  << " 255 operands so this will probably fail."
1099  << " Add a pack instruction using wider lane"
1100  << " width, such as PACK32X" << (elemCount/32)
1101  << " into your architecture."
1102  << std::endl;
1103  }
1104  } else { // not boolean.
1105  // makes no sense to have zillion inputs to build_vector.
1106  // load from const pool instead.
1107  TCEString packName = "PACK";
1108  switch (elemVT.SimpleTy) {
1109  case MVT::i8: packName << "8"; break;
1110  case MVT::i16: packName << "16"; break;
1111  case MVT::i32: packName << "32"; break;
1112  default: std::cerr << elemVT.SimpleTy << ", "; break;
1113  }
1114  packName << "X" << elemCount;
1115  // pack op not found from the adf or too big
1116  if (elemCount > 4 || !tm_.hasOperation(packName)) {
1117  return SDValue(nullptr, 0);
1118  }
1119  }
1120  }
1121 
1122  if (canEncodeConstantOperands(*node)) {
1123  return Op;
1124  }
1125  }
1126 
1127  // TODO: Check if there is enough register for the build_vector needed by
1128  // LLVM's register allocator.
1129 
1130  // There is issue with build_vector to be selected as all-register-operand
1131  // version of PACK (i.e. PACKtrrrrrrrr). LLVM's register allocator tries
1132  // allocate as many i32 registers as there is register operands. For
1133  // example with PACK8X64, the allocator tries to reserve 64 i32 register(!)
1134  // and likely runs out of them.
1135 
1136  if (Application::verboseLevel() > 1) {
1137  std::cerr << "Expanding build_vector of "
1138  << Op->getValueType(0).getEVTString()
1139  << " = { ";
1140  for (unsigned i = 0; i < node->getNumOperands(); i++) {
1141  auto opdNode = node->getOperand(i).getNode();
1142  if (isa<ConstantSDNode>(opdNode)) {
1143  ConstantSDNode* cn = cast<ConstantSDNode>(opdNode);
1144  std::cerr << cn->getSExtValue() << " ";
1145  } else {
1146  std::cerr << "Reg ";
1147  }
1148  }
1149  std::cerr << "}" << std::endl;
1150  }
1151 
1152  // TODO: Expand to insert_vector_elt chain rather than to expansion done by
1153  // LLVM
1154 
1155  // Expand to a load from constant pool or to an in-stack fabrication.
1156  return SDValue(nullptr, 0);
1157 }
1158 
1159 SDValue
1160 TCETargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
1161 
1162  // ARM ripoff
1163 
1164  // vastart just stores the address of the VarArgsFrameIndex slot into the
1165  // memory location argument.
1166  SDLoc dl(Op);
1167  EVT PtrVT =
1168  DAG.getTargetLoweringInfo().getPointerTy(
1169  getTargetMachine().createDataLayout(), 0);
1170  SDValue FR = DAG.getFrameIndex(getVarArgsFrameOffset(), PtrVT);
1171  const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1172  return DAG.getStore(
1173  Op.getOperand(0), dl, FR, Op.getOperand(1), MachinePointerInfo(SV));
1174 }
1175 
1176 /**
1177  * Returns the preferred result type of comparison operations.
1178  *
1179  * @param VT Result type of the comparison operation.
1180  * @return Preferred comparison result type.
1181  */
1182 EVT
1184  const DataLayout &DL, LLVMContext &context, llvm::EVT VT) const {
1185  if (VT.isVector()) {
1186  EVT resultVectorType = getSetCCResultVT(VT);
1187  if (resultVectorType != MVT::INVALID_SIMPLE_VALUE_TYPE) {
1188  return resultVectorType;
1189  }
1190  }
1191  if (!VT.isVector()) return hasI1RC_ ? llvm::MVT::i1 : llvm::MVT::i32;
1192  return VT.changeVectorElementTypeToInteger();
1193 }
1194 
1195 #ifdef OLD_VECTOR_CODE
1196 static
1197 SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) {
1198  EVT VT = Op.getValueType();
1199  DebugLoc dl = Op.getDebugLoc();
1200  SDValue Chain = Op.getOperand(0);
1201 
1202  // TODO: why is this here?
1203  if (VT == MVT::v4i32) {
1204  EVT ptrVT = Op.getOperand(1).getValueType();
1205 
1206  SDValue Ptr0, Ptr1, Ptr2, Ptr3;
1207  SDValue Imm0 = DAG.getConstant(0, ptrVT);
1208  SDValue Imm1 = DAG.getConstant(1, ptrVT);
1209  SDValue Imm2 = DAG.getConstant(2, ptrVT);
1210  SDValue Imm3 = DAG.getConstant(3, ptrVT);
1211 
1212  Ptr0 = Op.getOperand(1);
1213  Ptr1 = DAG.getNode(ISD::ADD, dl, ptrVT,
1214  Op.getOperand(1), Imm1);
1215  Ptr2 = DAG.getNode(ISD::ADD, dl, ptrVT,
1216  Op.getOperand(1), Imm2);
1217  Ptr3 = DAG.getNode(ISD::ADD, dl, ptrVT,
1218  Op.getOperand(1), Imm3);
1219  SDValue Elt0 = DAG.getLoad(
1220  MVT::i32, dl, Chain, Ptr0, MachinePointerInfo(), false, false, 0);
1221  SDValue Elt1 = DAG.getLoad(
1222  MVT::i32, dl, Chain, Ptr1, MachinePointerInfo(), false, false, 0);
1223  SDValue Elt2 = DAG.getLoad(
1224  MVT::i32, dl, Chain, Ptr2, MachinePointerInfo(), false, false, 0);
1225  SDValue Elt3 = DAG.getLoad(
1226  MVT::i32, dl, Chain, Ptr3, MachinePointerInfo(), false, false, 0);
1227  // SDValue Result = DAG.getTargetInsertSubreg(0, dl, MVT::v4i32,
1228  // DAG.getTargetInsertSubreg(1, dl, MVT::v4i32,
1229  // DAG.getTargetInsertSubreg(2, dl, MVT::v4i32,
1230  // DAG.getTargetInsertSubreg(3, dl, MVT::v4i32,
1231  // DAG.getUNDEF(MVT::v4i32),
1232  // Elt3), Elt2), Elt1), Elt0);
1233 
1234  // SDValue Result = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1235  // Elt0, Elt1, Elt2, Elt3);
1236 
1237  SDValue Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v4i32,
1238  DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v4i32,
1239  DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v4i32,
1240  DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v4i32,
1241  DAG.getNode(ISD::UNDEF, dl, MVT::v4i32),
1242  Elt0, Imm0),
1243  Elt1, Imm1),
1244  Elt2, Imm2),
1245  Elt3, Imm3);
1246 
1247  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1248  Elt0.getValue(1), Elt1.getValue(1),
1249  Elt2.getValue(1), Elt3.getValue(1));
1250 
1251  SDValue Ops[] = {Result, Chain};
1252 
1253  return DAG.getMergeValues(Ops, 2, dl);
1254  }
1255 
1256  llvm_unreachable("Invalid LOAD to lower!");
1257 }
1258 #endif
1259 
1260 std::pair<int, TCEString> TCETargetLowering::getConstShiftNodeAndTCEOP(SDValue op) const {
1261  switch(op.getOpcode()) {
1262  case ISD::SRA:
1263  return std::make_pair(TCEISD::SRA_Const, TCEString("SHR"));
1264  case ISD::SRL:
1265  return std::make_pair(TCEISD::SRL_Const, TCEString("SHRU"));
1266  case ISD::SHL:
1267  return std::make_pair(TCEISD::SHL_Const, TCEString("SHL"));
1268  default:
1269  return std::make_pair(0, TCEString("unknown op"));
1270  }
1271 }
1272 
1273 SDValue
1274 TCETargetLowering::LowerShift(SDValue op, SelectionDAG& dag) const {
1275 
1276  auto shiftOpcodes = getConstShiftNodeAndTCEOP(op);
1277  int shiftOpcode = shiftOpcodes.first;
1278  assert(shiftOpcode && "Shift opcide not supported, should not be here");
1279 
1280  SDValue R = op.getOperand(0);
1281  SDValue Amt = op.getOperand(1);
1282  const DebugLoc& dl = op.getDebugLoc();
1283  std::set<unsigned long> supportedShifts;
1284 
1285 
1286  // find all the constant shifts
1287  for (int i = 1; i < 32; i++) {
1288  TCEString opName = shiftOpcodes.second; opName << i << "_32";
1289  if (tm_.hasOperation(opName)) {
1290  supportedShifts.insert(i);
1291  }
1292  }
1293 
1294  // add also 1-bit shift for add
1295  // we should ALWAYS have an add but - lets check to be sure ;)
1296  if (tm_.hasOperation("ADD")) {
1297  supportedShifts.insert(1);
1298  }
1299 
1300  if (Amt.getOpcode() == ISD::Constant) {
1301  unsigned long amount = op.getConstantOperandVal(1);
1302  // if has no correct-width shift, need to break down into multiple.
1303  if (supportedShifts.find(amount) == supportedShifts.end()) {
1304  // find the biggest suitable shift.
1305  for (auto i = supportedShifts.rbegin();
1306  i != supportedShifts.rend(); i++) {
1307  if (amount > *i) {
1308  auto shiftVal =
1309  dag.getConstant(*i, op, Amt.getValueType());
1310  auto remVal =
1311  dag.getConstant(amount - *i, op, Amt.getValueType());
1312  SDValue remaining = dag.getNode(
1313  op.getOpcode(), op, op.getValueType(), R, remVal);
1314  SDValue lowered = LowerShift(remaining, dag);
1315  SDValue shift = dag.getNode(
1316  shiftOpcode, op, op.getValueType(), lowered, shiftVal);
1317  return shift;
1318  }
1319  }
1320  }
1321  return op;
1322 
1323  } else {
1324  unsigned Opc = op.getOpcode();
1325  switch(Opc) {
1326  case ISD::SRA:
1327  return ExpandLibCall(RTLIB::SRA_I32, op.getNode(), true, dag);
1328  case ISD::SRL:
1329  return ExpandLibCall(RTLIB::SRL_I32, op.getNode(), false, dag);
1330  case ISD::SHL:
1331  return ExpandLibCall(RTLIB::SHL_I32, op.getNode(), false, dag);
1332  default:
1333  std::cerr << "Invalid dynamic shift opcode" << std::endl;
1334  }
1335  }
1336  return op;
1337 }
1338 
1339 SDValue
1340 TCETargetLowering::lowerHWLoops(SDValue op, SelectionDAG &dag) const {
1341  if (cast<ConstantSDNode>(op->getOperand(1))->getZExtValue() !=
1342  Intrinsic::set_loop_iterations) {
1343  std::cerr << "Trying to lower invalid hwloop instruction"
1344  << std::endl;
1345  return op;
1346  }
1347 
1348  // If hwloop is last instruction, let TDGen do the lowering.
1349  if (op->use_empty()) return op;
1350 
1351  // If we see a jump after hwloop, we have the hwloop
1352  // in correct place. Let TDGen handle the hwloop lowering.
1353  if (op->use_size() > 1) {
1354  dag.dump();
1355  assert(false && "HWLoop should not have more than one Use");
1356  }
1357  auto linkNode = op->use_begin();
1358  auto linkOpc = linkNode->getOpcode();
1359  if (linkOpc == ISD::BR || linkOpc == ISD::HANDLENODE) {
1360  // hwloop follows branch (or) last instruction of BB,
1361  // No action needed.
1362  return op;
1363  }
1364 
1365  // Sanity check for known pattern. Expected patterns,
1366  // - hwloop -> TokenFactor -> BR (or)
1367  // - hwloop -> TokenFactor
1368  if (linkOpc != ISD::TokenFactor || linkNode->use_size() > 1) {
1369  dag.dump();
1370  assert(false && "HWLoop loop pattern not implemented.");
1371  }
1372 
1373  // Create HWLOOP operands with link to ISD::BR node
1374  // i.e. TokenFactor -> HWLOOP -> BR
1375  SmallVector<SDValue, 8> ops;
1376  SmallVector<SDValue, 8> linkOps;
1377  bool replaceLinkNode = false;
1378  for (int i = 0; i < op.getNumOperands(); i++) {
1379  // Swap the use list of op and linkNode
1380  if (i == 0) {
1381  // Set TokenFactor as 1st operand
1382  ops.push_back(SDValue(*linkNode, 0));
1383 
1384  // Create operand list for linkNode
1385  for (int j = 0; j < linkNode->getNumOperands(); j++) {
1386  if (linkNode->getOperand(j) == op) {
1387  linkOps.push_back(op.getOperand(i));
1388  } else {
1389  linkOps.push_back(linkNode->getOperand(j));
1390  }
1391  }
1392  replaceLinkNode = true;
1393  } else {
1394  // Keep rest of the operands as it is in hwloop
1395  ops.push_back(op.getOperand(i));
1396  }
1397  }
1398  SDLoc dl(op);
1399  dag.ReplaceAllUsesWith(*linkNode, &op);
1400  auto Chain = dag.UpdateNodeOperands(op.getNode(), ArrayRef<SDValue>(ops));
1401  if (replaceLinkNode) {
1402  SDValue newLinkNode = dag.getNode(
1403  linkNode->getOpcode(), dl, MVT::Other,
1404  ArrayRef<SDValue>(linkOps));
1405  dag.ReplaceAllUsesWith(*linkNode, &newLinkNode);
1406  }
1407  return op;
1408 }
1409 
1410 /**
1411  * Handles custom operation lowerings.
1412  */
1413 SDValue
1414 TCETargetLowering::LowerOperation(SDValue op, SelectionDAG& dag) const {
1415  switch(op.getOpcode()) {
1416  case ISD::TRAP: return LowerTRAP(op, dag);
1417  case ISD::GlobalAddress: return LowerGLOBALADDRESS(op, dag);
1418  case ISD::BlockAddress: return LowerBlockAddress(op, dag);
1419  case ISD::VASTART: return LowerVASTART(op, dag);
1420  case ISD::ConstantPool: return LowerCONSTANTPOOL(op, dag);
1421  case ISD::Constant: return LowerConstant(op, dag);
1422  case ISD::BUILD_VECTOR: return LowerBuildVector(op, dag);
1423  case ISD::SHL:
1424  case ISD::SRA:
1425  case ISD::SRL: return LowerShift(op, dag);
1426  case ISD::LOAD: return lowerExtOrBoolLoad(op, dag);
1427  case ISD::INTRINSIC_VOID:
1428  return lowerHWLoops(op, dag);
1429  case ISD::DYNAMIC_STACKALLOC: {
1430  assert(false && "Dynamic stack allocation not yet implemented.");
1431  }
1432 #ifdef OLD_VECTOR_CODE
1433  case ISD::LOAD: return LowerLOAD(op, dag);
1434 #endif
1435  }
1436  op.getNode()->dump(&dag);
1437  assert(0 && "Custom lowerings not implemented!");
1438 }
1439 
1440 SDValue
1441 TCETargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const {
1442  SelectionDAG &DAG = DCI.DAG;
1443  SDLoc dl(N);
1444  switch (N->getOpcode()) {
1445  default:
1446  break;
1447  case ISD::BRCOND: {
1448  SDValue Cond = N->getOperand(1);
1449  SDValue Target = N->getOperand(2);
1450  // Corner case for decrement -> setcc -> brcond link
1451  if (Cond.getOpcode() == ISD::SETCC)
1452  Cond = Cond.getOperand(0);
1453  if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
1454  cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() ==
1455  Intrinsic::loop_decrement) {
1456  /// Replace the decrement use chain with it predecessor
1457  /// The decrement should connect to BRCOND node directly or
1458  /// via TokeneFactor. If it is connected via TokenFactor, move
1459  /// the decrement close to BRCOND by updating the chain
1460  auto chain = N->getOperand(0);
1461  // Correct form. no action needed
1462  if (chain.getNode() == Cond.getNode()) {
1463  DAG.SelectNodeTo(N, TCE::LJUMP, MVT::Other,
1464  N->getOperand(2), Cond->getOperand(0));
1465  } else {
1466  assert((chain.getOpcode() == ISD::TokenFactor) &&
1467  "chain to brcond is not TokenFactor.");
1468  assert((N->use_begin()->getOpcode() == ISD::BR) &&
1469  "brcond is not connected to br.");
1470  SmallVector<SDValue, 8> Ops;
1471  bool hasDecrement = false;
1472  for (unsigned i = 0, e = chain->getNumOperands(); i != e;
1473  ++i) {
1474  if (chain->getOperand(i).getNode() ==
1475  Cond.getNode()) {
1476  hasDecrement = true;
1477  Ops.push_back(Cond->getOperand(0));
1478  } else {
1479  Ops.push_back(chain->getOperand(i));
1480  }
1481  }
1482  assert(hasDecrement &&
1483  "Unable to find Chain for loop decrement");
1484  auto newChain = DAG.getNode(
1485  ISD::TokenFactor, SDLoc(chain), MVT::Other, Ops);
1486  DAG.ReplaceAllUsesOfValueWith(chain, newChain);
1487 
1488  // Custom ISEL for LJUMP
1489  DAG.UpdateNodeOperands(
1490  N, newChain, N->getOperand(1), N->getOperand(2));
1491  DAG.SelectNodeTo(
1492  N, TCE::LJUMP, MVT::Other, N->getOperand(2),
1493  newChain);
1494  }
1495  }
1496  }
1497  }
1498  return SDValue();
1499 }
1500 
1501 //===----------------------------------------------------------------------===//
1502 // Inline Assembly Support
1503 //===----------------------------------------------------------------------===//
1504 
1505 /// getConstraintType - Given a constraint letter, return the type of
1506 /// constraint it is for this target.
1507 TCETargetLowering::ConstraintType
1508 TCETargetLowering::getConstraintType(StringRef Constraint) const {
1509  if (Constraint.size() == 1) {
1510  switch (Constraint[0]) {
1511  default: break;
1512  case 'r': return C_RegisterClass;
1513  }
1514  }
1515  return TargetLowering::getConstraintType(Constraint);
1516 }
1517 
1518 const TargetRegisterClass*
1520  const TargetRegisterInfo* TRI,
1521  MVT VT) const {
1522 
1523  if (!VT.isVector()) return nullptr;
1524 
1525  const TargetRegisterClass* bestVRC = nullptr;
1526  // Find smallest RF by using stack spill size as reg size indication.
1527  for (unsigned i = 0U; i < TRI->getNumRegClasses(); i++) {
1528  auto vrc = TRI->getRegClass(i);
1529  if (TRI->isTypeLegalForClass(*vrc, VT) &&
1530  (!bestVRC || vrc->MC->RegsSize < bestVRC->MC->RegsSize)) {
1531  bestVRC = vrc;
1532  }
1533  }
1534  return bestVRC;
1535 }
1536 
1537 /**
1538  * Returns proper register class for given value type.
1539  *
1540  * @param Constraint A constraint defined for an inline asm operation operand.
1541  * @return Proper register class for the operand value type.
1542  */
1543 std::pair<unsigned, const TargetRegisterClass *>
1545  const TargetRegisterInfo *TRI,
1546  StringRef Constraint, MVT VT) const {
1547  if (Constraint.size() == 1) {
1548  // check if value type is a vector and return associated reg class
1549  std::pair<unsigned, const TargetRegisterClass *> rcPair =
1551 
1552  switch (Constraint[0]) {
1553  case 'r':
1554  // if found associated vector reg class
1555  if (rcPair.second != NULL) {
1556  return rcPair;
1557  }
1558  }
1559  }
1560 
1561  bool isPhysicalRegister = Constraint.size() > 3
1562  && Constraint.front() == '{' && Constraint.back() == '}';
1563 
1564  const TargetRegisterClass* vrc = nullptr;
1565  if (Constraint.size() == 1) {
1566  switch (Constraint[0]) {
1567  case 'r':
1568  // Prefer vector RFs for vector types and then try
1569  // scalar RFs.
1571  if (vrc) return std::make_pair(0U, vrc);
1572 
1573  switch (VT.getSizeInBits()) {
1574  case 8:
1575  case 16:
1576  case 32:
1577  case 64:
1578  return std::make_pair(0U, &DEFAULT_REG_CLASS);
1579  default:
1580  break;
1581  }
1582  return std::make_pair(0U, nullptr); // return error.
1583  // TODO: this should be some other char. But change it in devel64b
1584  case 's':
1585  return std::make_pair(0U, &TCE::R64RegsRegClass);
1586  case 'f':
1587  if (VT == MVT::f32) {
1588  return std::make_pair(0U, &TCE::FPRegsRegClass);
1589  }
1590 #ifdef TARGET64BIT
1591  case 'd':
1592  return std::make_pair(0U, &TCE::R64DFPRegsRegClass);
1593 #endif
1594  }
1595  } else if (isPhysicalRegister) {
1596  // Constraint = {<RF-name>.<Register-index>}
1597  const std::string regName = Constraint.substr(1, Constraint.size()-2).str();
1598  unsigned regId = tm_.llvmRegisterId(regName);
1599  if (regId == TCE::NoRegister) {
1600  // No such register. Return error.
1601  return std::make_pair(0, nullptr);
1602  }
1603 
1604  // In case the reg is boolean register via local register
1605  // variable (ie. "register int foo asm("BOOL.1") = ...").
1606  if (TCE::R1RegsRegClass.contains(regId)) {
1607  return std::make_pair(regId, &TCE::R1RegsRegClass);
1608  }
1609  if (TCE::GuardRegsRegClass.contains(regId)) {
1610  return std::make_pair(regId, &TCE::GuardRegsRegClass);
1611  }
1612 
1613  return std::make_pair(regId, TRI->getMinimalPhysRegClass(regId, VT));
1614  }
1615  return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
1616 }
1617 
1618 // For invalid constraint, like unsupported immediates, add nothing into Ops.
1619 void
1621  SDValue Op,
1622  std::string& Constraint,
1623  std::vector<SDValue>& Ops,
1624  SelectionDAG& DAG) const {
1625 
1626  if (Constraint.length() == 1) {
1627  switch (Constraint[0]) {
1628  case 'i':
1629  if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
1630  if (!canEncodeImmediate(*C)) {
1631  return;
1632  }
1633  }
1634  break;
1635  default:
1636  break;
1637  }
1638  }
1639 
1640  TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
1641 }
1642 
1643 std::vector<unsigned> TCETargetLowering::
1644 getRegClassForInlineAsmConstraint(const std::string &Constraint,
1645  EVT VT) const {
1646  if (Constraint.size() != 1)
1647  return std::vector<unsigned>();
1648 
1649  switch (Constraint[0]) {
1650  default: break;
1651  case 'r':
1652  // TODO: WHAT TO DO WITH THESE?
1653  return std::vector<unsigned>(1,0);
1654 
1655  }
1656 
1657  return std::vector<unsigned>();
1658 }
1659 
1660 bool
1661 TCETargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
1662  return false;
1663 }
1664 
1665 
1666 /*
1667 bool
1668  TCETargetLowering::allowsMisalignedMemoryAccesses(EVT, unsigned, unsigned, MachineMemOperand::Flags, bool*) const {
1669  /// @todo This commented area and the whole function is probably not
1670  /// needed anymore. The base class version returns false as default.
1671  return false;
1672 }
1673 */
1674 
1675 /**
1676  * Returns true if all operands of the SDNode are constants or undefined.
1677  */
1678 bool
1680  for (unsigned i = 0; i < node.getNumOperands(); i++) {
1681  auto opc = node.getOperand(i)->getOpcode();
1682  if (opc != ISD::Constant && opc != ISD::UNDEF) {
1683  return false;
1684  }
1685  }
1686  return true;
1687 }
1688 
1689 /**
1690  * Check if constant operands used by the SDNode can be encoded as immediate
1691  * on the target machine.
1692  */
1693 bool
1695  for (unsigned i = 0; i < node.getNumOperands(); i++) {
1696  if (node.getOperand(i)->getOpcode() != ISD::Constant) continue;
1697  ConstantSDNode* cn =
1698  cast<ConstantSDNode>(node.getOperand(i).getNode());
1699  if (!canEncodeImmediate(*cn)) return false;
1700  }
1701  return true;
1702 }
1703 
1704 /**
1705  * Check if the constant can be generally encoded as immediate
1706  * on the target machine.
1707  */
1708 bool
1709 TCETargetLowering::canEncodeImmediate(const ConstantSDNode& node) const {
1710  int64_t val = node.getSExtValue();
1711  MVT vt = node.getSimpleValueType(0);
1712 
1713  // We accept here only constant that can be materialized in instruction
1714  // selection in some way and this must be done by the lowest common
1715  // denominator.
1716 
1717  // can encode as MOVI?
1718  // Assuming here, that the immediate can be transported to any target
1719  // machine operation.
1720  if (tm_.canEncodeAsMOVI(vt, val))
1721  return true;
1722 
1723  // can encode as immediate to operation
1724  // TODO?
1725 
1726  // can encode as immToOp for user that is exactly known to be selected
1727  // to certain target instruction?
1728 
1729  // can encode through ISEL transformation?
1730  if (tm_.canMaterializeConstant(*node.getConstantIntValue()))
1731  return true;
1732 
1733  return false;
1734 }
1735 
1736 /**
1737  * Returns true if the address values should be loaded from constant pool due
1738  * to limited immediate support.
1739  *
1740  */
1741 bool
1743  if (loadGAFromConstantPool_.count(addressSpace) == 0) {
1744  // Default behavior for unspecified address spaces.
1745  assert(loadGAFromConstantPool_.count(0));
1746  return loadGAFromConstantPool_.at(0);
1747  }
1748 
1749  return loadGAFromConstantPool_.at(addressSpace);
1750 }
1751 
1752 /**
1753  * Returns true if the target machine has register class for i1 types.
1754  */
1755 bool
1757  if (TCE::R1RegsRegClass.getNumRegs() == 0) return false;
1758 
1759  // TDGen generates dummy register class for the machines without boolean
1760  // RFs.
1761  if (TCE::R1RegsRegClass.getNumRegs() == 1) {
1762  std::string regName = tm_.rfName(TCE::R1RegsRegClass.getRegister(0));
1763  if (regName.find("dummy") != std::string::npos) return false;
1764  }
1765 
1766  return true;
1767 }
1768 
1769 /**
1770  * Check the FP in bits can be fit in machine's immediates.
1771  */
1772 bool
1774  const APFloat& apf, EVT VT, bool forCodeSize) const {
1775  if (VT==MVT::f32 || VT==MVT::f16) {
1776  return tm_.canEncodeAsMOVF(apf);
1777  }
1778  return false;
1779 }
1780 
1781 bool
1783  if (n->getOpcode() != ISD::BUILD_VECTOR) {
1784  return false;
1785  }
1786  SDValue val = n->getOperand(0);
1787  int operandCount = n->getNumOperands();
1788  for (unsigned i = 1; i <operandCount; i++) {
1789  SDValue val2 = n->getOperand(i);
1790  SDNode* node2 = dyn_cast<SDNode>(val2);
1791  if (node2->getOpcode() != ISD::UNDEF) {
1792  if (val2 != val)
1793  return false;
1794  }
1795  }
1796  return true;
1797 }
1798 
1799 
1800 // TODO: This is copypaste from legalizeDAG. Because the
1801 // routine in legalizeDAG is not public
1802 SDValue
1803  TCETargetLowering::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node,
1804  bool isSigned, SelectionDAG &DAG) const {
1805 
1806  TargetLowering::ArgListTy Args;
1807  TargetLowering::ArgListEntry Entry;
1808  for (const SDValue &Op : Node->op_values()) {
1809  EVT ArgVT = Op.getValueType();
1810  Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
1811  Entry.Node = Op;
1812  Entry.Ty = ArgTy;
1813  Entry.IsSExt = shouldSignExtendTypeInLibCall(ArgVT, isSigned);
1814  Entry.IsZExt = !shouldSignExtendTypeInLibCall(ArgVT, isSigned);
1815  Args.push_back(Entry);
1816  }
1817  SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
1818  getPointerTy(DAG.getDataLayout(),0));
1819 
1820  EVT RetVT = Node->getValueType(0);
1821  Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
1822 
1823  // By default, the input chain to this libcall is the entry node of the
1824  // function. If the libcall is going to be emitted as a tail call then
1825  // TLI.isUsedByReturnOnly will change it to the right chain if the return
1826  // node which is being folded has a non-entry input chain.
1827  SDValue InChain = DAG.getEntryNode();
1828 
1829  // isTailCall may be true since the callee does not reference caller stack
1830  // frame. Check if it's in the right position and that the return types match.
1831  SDValue TCChain = InChain;
1832  const Function &F = DAG.getMachineFunction().getFunction();
1833  bool isTailCall =
1834  isInTailCallPosition(DAG, Node, TCChain) &&
1835  (RetTy == F.getReturnType() || F.getReturnType()->isVoidTy());
1836  if (isTailCall)
1837  InChain = TCChain;
1838 
1839  TargetLowering::CallLoweringInfo CLI(DAG);
1840  bool signExtend = shouldSignExtendTypeInLibCall(RetVT, isSigned);
1841  CLI.setDebugLoc(SDLoc(Node))
1842  .setChain(InChain)
1843  .setLibCallee(getLibcallCallingConv(LC), RetTy, Callee,
1844  std::move(Args))
1845  .setTailCall(isTailCall)
1846  .setSExtResult(signExtend)
1847  .setZExtResult(!signExtend)
1848  .setIsPostTypeLegalization(true);
1849 
1850  std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
1851 
1852  if (!CallInfo.second.getNode()) {
1853  // It's a tailcall, return the chain (which is the DAG root).
1854  return DAG.getRoot();
1855  }
1856 
1857  return CallInfo.first;
1858 }
1859 
1861  SDNode* node, SmallVectorImpl<SDValue>& Results,
1862  SelectionDAG& DAG) const {
1863  auto fnName = DAG.getMachineFunction().getName().str();
1864 
1865  SDValue shiftedVal;
1866  SDValue truncAnd;
1867  if (node->getOpcode() == ISD::LOAD) {
1868  auto lsdn = dyn_cast<LoadSDNode>(node);
1869  if (lsdn == nullptr) {
1870  std::cerr << "Error: null loadsdnde!" << std::endl;
1871  return;
1872  }
1873 
1874  if (lsdn->getAlignment() < 2 &&
1875  lsdn->getMemoryVT() != MVT::i8 && lsdn->getMemoryVT() != MVT::i1) {
1876  assert(0 && "Cannot lower 16-bit memory op with only one byte alignment");
1877  }
1878 
1879  auto chain = node->getOperand(0);
1880 
1881  SDValue load;
1882  SDValue lowBits;
1883  if (lsdn->getAlignment() >= 4) {
1884  load = DAG.getLoad(
1885  MVT::i32, node, chain, lsdn->getBasePtr(), MachinePointerInfo());
1886  } else {
1887  auto alignedAddr =
1888  DAG.getNode(
1889  ISD::AND, node, MVT::i32, lsdn->getBasePtr(),
1890  DAG.getConstant(-4l, node, MVT::i32));
1891 
1892  auto lowBytes = DAG.getNode(
1893  ISD::AND, node, MVT::i32, lsdn->getBasePtr(),
1894  DAG.getConstant(3l, node, MVT::i32));
1895 
1896  lowBits = DAG.getNode(
1897  ISD::SHL, node, MVT::i32, lowBytes,
1898  DAG.getConstant(3l, node, MVT::i32));
1899 
1900  load = DAG.getLoad(
1901  MVT::i32, node, chain, alignedAddr, MachinePointerInfo());
1902  }
1903 
1904  // TODO: breaks with 64 bits!
1905  // TODO: also breaks with 16-bit floats?
1906  MVT vt = node->getSimpleValueType(0);
1907  if (vt == MVT::i32) {
1908  assert(0 && "Result i32? this should be extload?");
1909  Results.push_back(SDValue(load));
1910  Results.push_back(SDValue(load.getNode(),1));
1911  return;
1912  }
1913 
1914  SDValue finalVal;
1915  if (lsdn->getExtensionType() == ISD::ZEXTLOAD) {
1916  shiftedVal = lsdn->getAlignment() < 4 ?
1917  DAG.getNode(ISD::SRA, node, MVT::i32, load, lowBits):
1918  load;
1919 
1920  if (lsdn->getMemoryVT() == MVT::i1) {
1921  finalVal = DAG.getNode(
1922  ISD::AND, node, MVT::i32, shiftedVal,
1923  DAG.getConstant(1l, node, MVT::i32));
1924  } else if (lsdn->getMemoryVT() == MVT::i8) {
1925  finalVal = DAG.getNode(
1926  ISD::AND, node, MVT::i32, shiftedVal,
1927  DAG.getConstant(255l, node, MVT::i32));
1928  } else {
1929  // TODO: 64-bit port needs to add option for 32-bit here.
1930  assert(0 && "Wrong memory vt in zextload!");
1931  }
1932  } else if (lsdn->getExtensionType() == ISD::SEXTLOAD) {
1933  if (lsdn->getMemoryVT() == MVT::i1) {
1934  auto shiftsLeft =
1935  DAG.getNode(ISD::SUB, node, MVT::i32,
1936  DAG.getConstant(31l, node, MVT::i32),lowBits);
1937  auto shiftUp = DAG.getNode(
1938  ISD::SHL, node, MVT::i32, load, shiftsLeft);
1939  finalVal = DAG.getNode(
1940  ISD::SRA, node, MVT::i32, shiftUp,
1941  DAG.getConstant(31l, node, MVT::i32));
1942  } else if (lsdn->getMemoryVT() == MVT::i8) {
1943  auto shiftsLeft =
1944  DAG.getNode(ISD::SUB, node, MVT::i32,
1945  DAG.getConstant(24l, node, MVT::i32),lowBits);
1946  auto shiftUp = DAG.getNode(
1947  ISD::SHL, node, MVT::i32, load, shiftsLeft);
1948  finalVal = DAG.getNode(
1949  ISD::SRA, node, MVT::i32, shiftUp,
1950  DAG.getConstant(24l, node, MVT::i32));
1951  } else {
1952  // TODO: 64-bit port needs to add option for 32-bit here.
1953  assert(0 && "Wrong memory vt in sextload!");
1954  }
1955  } else { // anyext/noext.
1956  finalVal = lsdn->getAlignment() < 4 ?
1957  DAG.getNode(ISD::SRA, node, MVT::i32, load, lowBits):
1958  load;
1959  }
1960 
1961  SDValue rv;
1962  if (vt == MVT::i16) {
1963  rv = DAG.getAnyExtOrTrunc(finalVal, node, MVT::i16);
1964  } else if (vt == MVT::i8) {
1965  rv = DAG.getAnyExtOrTrunc(finalVal, node, MVT::i8);
1966  } else if (vt == MVT::i1) {
1967  rv = DAG.getAnyExtOrTrunc(finalVal, node, MVT::i1);
1968  } else {
1969  assert(0 && "Wrong vt in load lowering!");
1970  }
1971 
1972  Results.push_back(rv);
1973  Results.push_back(SDValue(load.getNode(),1));
1974  } else {
1975  assert(false && "ReplaceNodeResults not load!");
1976  }
1977 }
1978 
1979 /**
1980  * Lowers extension load of 8- or 16-bit load to 32-bit little-endian load.
1981  */
1983  SDValue op,
1984  SelectionDAG& DAG) const {
1985 
1986  auto lsdn = dyn_cast<LoadSDNode>(op.getNode());
1987  if (lsdn == nullptr) {
1988  assert(false && "Not a lodsdnode on LowerExtLoad!");
1989  }
1990 
1991  auto chain = op.getOperand(0);
1992  SDValue alignedAddr;
1993  SDValue lowBits;
1994 
1995  if (lsdn->getAlignment() >= 4) {
1996  alignedAddr = lsdn->getBasePtr();
1997  lowBits = DAG.getConstant(0l, op, MVT::i32);
1998  } else {
1999  alignedAddr = DAG.getNode(
2000  ISD::AND, op, MVT::i32, lsdn->getBasePtr(),
2001  DAG.getConstant(-4l, op, MVT::i32));
2002 
2003  auto lowBytes = DAG.getNode(
2004  ISD::AND, op, MVT::i32, lsdn->getBasePtr(),
2005  DAG.getConstant(3l, op, MVT::i32));
2006 
2007  lowBits = DAG.getNode(
2008  ISD::SHL, op, MVT::i32, lowBytes,
2009  DAG.getConstant(3l, op, MVT::i32));
2010  }
2011 
2012  auto load = DAG.getLoad(
2013  MVT::i32, op, chain, alignedAddr, MachinePointerInfo());
2014 
2015  // this is little-endian code. big endian needs different.
2016  if (lsdn->getExtensionType() == ISD::ZEXTLOAD) {
2017  auto shiftedValue = lsdn->getAlignment() < 4 ?
2018  DAG.getNode(ISD::SRA, op, MVT::i32, load, lowBits) :
2019  load;
2020  if (lsdn->getMemoryVT() == MVT::i16) {
2021  assert(lsdn->getAlignment() >= 2 &&
2022  "Cannot (yet?) emulate a 16-bit load which has 1-byte alignment. "
2023  " 16-bit memory operations needed to compile this code." );
2024  std::cerr << "\t\tSource is 16 bits." << std::endl;
2025  auto zext = DAG.getNode(
2026  ISD::AND, op, MVT::i32, shiftedValue,
2027  DAG.getConstant(65535l, op, MVT::i32));
2028  return zext;
2029  } else if (lsdn->getMemoryVT() == MVT::i8) {
2030  auto zext = DAG.getNode(
2031  ISD::AND, op, MVT::i32, shiftedValue,
2032  DAG.getConstant(255l, op, MVT::i32));
2033  return zext;
2034  } else if (lsdn->getMemoryVT() == MVT::i1) {
2035  auto zext = DAG.getNode(
2036  ISD::AND, op, MVT::i32, shiftedValue,
2037  DAG.getConstant(1l, op, MVT::i32));
2038  return zext;
2039  } else {
2040  assert(false && "Unknown data type on LowerSExtLoad!");
2041  }
2042  }
2043  if (lsdn->getExtensionType() == ISD::SEXTLOAD) {
2044 
2045  // shift left to get it to upper bits, then arithmetic right.
2046  if (lsdn->getMemoryVT() == MVT::i16) {
2047  auto shiftsLeft = lsdn->getAlignment() < 4 ?
2048  DAG.getNode(ISD::SUB, op, MVT::i32,
2049  DAG.getConstant(16l, op, MVT::i32),
2050  lowBits) :
2051  DAG.getConstant(16l, op, MVT::i32);
2052  auto shiftUp = DAG.getNode(
2053  ISD::SHL, op, MVT::i32, load, shiftsLeft);
2054  auto shiftDown = DAG.getNode(
2055  ISD::SRA, op, MVT::i32, shiftUp,
2056  DAG.getConstant(16l, op, MVT::i32));
2057  return shiftDown;
2058  } else if (lsdn->getMemoryVT() == MVT::i8) {
2059  auto shiftsLeft = lsdn->getAlignment() < 4 ?
2060  DAG.getNode(ISD::SUB, op, MVT::i32,
2061  DAG.getConstant(24l, op, MVT::i32),
2062  lowBits) :
2063  DAG.getConstant(24l, op, MVT::i32);
2064  auto shiftUp =
2065  DAG.getNode(ISD::SHL, op, MVT::i32, load, shiftsLeft);
2066  auto shiftDown = DAG.getNode(
2067  ISD::SRA, op, MVT::i32, shiftUp,
2068  DAG.getConstant(24l, op, MVT::i32));
2069  return shiftDown;
2070  } else if (lsdn->getMemoryVT() == MVT::i1) {
2071  auto shiftsLeft = lsdn->getAlignment() < 4 ?
2072  DAG.getNode(ISD::SUB, op, MVT::i32,
2073  DAG.getConstant(31l, op, MVT::i32),
2074  lowBits) :
2075  DAG.getConstant(31l, op, MVT::i32);
2076 
2077  auto shiftUp =
2078  DAG.getNode(ISD::SHL, op, MVT::i32, load, shiftsLeft);
2079  auto shiftDown = DAG.getNode(
2080  ISD::SRA, op, MVT::i32, shiftUp,
2081  DAG.getConstant(31l, op, MVT::i32));
2082  return shiftDown;
2083  } else {
2084  assert(false && "Unknown data type on Lower(Z)ExtLoad!");
2085  }
2086  }
2087 
2088  // anyext?
2089  if (lsdn->getExtensionType() == ISD::EXTLOAD) {
2090  auto shiftedValue = lsdn->getAlignment() < 4 ?
2091  DAG.getNode(ISD::SRA, op, MVT::i32, load, lowBits) :
2092  load;
2093  auto shiftDown = DAG.getNode(ISD::SRA, op, MVT::i32, load, lowBits);
2094  return shiftDown;
2095  } else {
2096  // normal, not-extload.
2097  MVT vt = op->getSimpleValueType(0);
2098  if (vt == MVT::i1 && lsdn->getMemoryVT() == MVT::i1) {
2099  SDValue trunc = DAG.getAnyExtOrTrunc(load, op, MVT::i1);
2100  return trunc;
2101  }
2102 
2103  assert(false && "Should not be here, non-ext-load");
2104  }
2105  return SDValue();
2106 }
TCEISD::SELECT_F64
@ SELECT_F64
Definition: TCEISelLowering.hh:54
llvm::TCETargetMachine::rfName
std::string rfName(unsigned dwarfRegNum) const
Definition: TCETargetMachine.hh:204
llvm::TCETargetLowering::PerformDAGCombine
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
Definition: TCEISelLowering.cc:1441
llvm::TCETargetLowering::LowerVASTART
SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const
Definition: TCEISelLowering.cc:1160
llvm::TCETargetMachine::canEncodeAsMOVF
bool canEncodeAsMOVF(const llvm::APFloat &fp) const
Definition: TCETargetMachine.cc:382
llvm
Definition: InlineAsmParser.hh:49
llvm::TCETargetMachine::customLegalizedOperations
const std::set< std::pair< unsigned, llvm::MVT::SimpleValueType > > * customLegalizedOperations()
Definition: TCETargetMachine.cc:340
llvm::TCETargetMachine::llvmRegisterId
unsigned llvmRegisterId(const TCEString &ttaRegister)
Definition: TCETargetMachine.hh:221
TCEISD::SELECT_I8
@ SELECT_I8
Definition: TCEISelLowering.hh:48
TCEISD::SELECT_F32
@ SELECT_F32
Definition: TCEISelLowering.hh:53
TCEISD::SRA_Const
@ SRA_Const
Definition: TCEISelLowering.hh:68
llvm::TCETargetLowering::LowerBlockAddress
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
Definition: TCEISelLowering.cc:956
llvm::TCETargetLowering::LowerAsmOperandForConstraint
void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Definition: TCEISelLowering.cc:1620
TCEISD::SELECT_I64
@ SELECT_I64
Definition: TCEISelLowering.hh:51
llvm::TCETargetMachine::canMaterializeConstant
bool canMaterializeConstant(const ConstantInt &ci) const
Definition: TCETargetMachine.hh:321
llvm::TCETargetLowering::isConstantOrUndefBuild
bool isConstantOrUndefBuild(const SDNode &node) const
Definition: TCEISelLowering.cc:1679
DEFAULT_SIZE
#define DEFAULT_SIZE
Definition: TCEISelLowering.cc:80
llvm::TCESubtarget
Definition: TCESubtarget.hh:56
llvm::TCETargetLowering::getSetCCResultType
llvm::EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Definition: TCEISelLowering.cc:1183
llvm::TCETargetLowering::isFPImmLegal
virtual bool isFPImmLegal(const APFloat &apf, EVT VT, bool forCodeSize) const override
Definition: TCEISelLowering.cc:1773
AddressSpace.hh
MachineInfo.hh
llvm::TCETargetLowering::LowerFormalArguments
virtual SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, SDLOC_PARAM_TYPE dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
getFunctionAlignment - Return the Log2 alignment of this function.
Definition: TCEISelLowering.cc:145
llvm::TCETargetLowering::lowerExtOrBoolLoad
SDValue lowerExtOrBoolLoad(SDValue op, SelectionDAG &DAG) const
Definition: TCEISelLowering.cc:1982
Application::verboseLevel
static int verboseLevel()
Definition: Application.hh:176
LowerCONSTANTPOOL
static SDValue LowerCONSTANTPOOL(SDValue Op, SelectionDAG &DAG)
Definition: TCEISelLowering.cc:966
llvm::TCETargetLowering::LowerConstant
SDValue LowerConstant(SDValue Op, SelectionDAG &DAG) const
Definition: TCEISelLowering.cc:985
Application::logStream
static std::ostream & logStream()
Definition: Application.cc:155
LLVMTCECmdLineOptions::conservativePreRAScheduler
bool conservativePreRAScheduler() const
Definition: LLVMTCECmdLineOptions.cc:339
TCEISD::SHL_Const
@ SHL_Const
Definition: TCEISelLowering.hh:70
llvm::TCETargetLowering::LowerBuildVector
SDValue LowerBuildVector(SDValue Op, SelectionDAG &DAG) const
Definition: TCEISelLowering.cc:1033
llvm::TCETargetMachine::has8bitLoads
bool has8bitLoads() const
Definition: TCETargetMachine.hh:286
llvm::TCETargetLowering::isBroadcast
static bool isBroadcast(SDNode *n)
Definition: TCEISelLowering.cc:1782
TCERegisterInfo.hh
TCETargetObjectFile.hh
TCEISD::RET_FLAG
@ RET_FLAG
Definition: TCEISelLowering.hh:66
llvm::TCETargetMachine::hasOperation
bool hasOperation(TCEString operationName) const
Definition: TCETargetMachine.hh:200
llvm::TCETargetLowering::LowerShift
SDValue LowerShift(SDValue op, SelectionDAG &dag) const
Definition: TCEISelLowering.cc:1274
assert
#define assert(condition)
Definition: Application.hh:86
llvm::TCETargetLowering::addVectorRegisterClasses
void addVectorRegisterClasses()
Implementation generated to Backend.inc from TDGenSIMD.cc.
TCEISD::CALL
@ CALL
Definition: TCEISelLowering.hh:65
llvm::TCETargetLowering::LowerTRAP
SDValue LowerTRAP(SDValue Op, SelectionDAG &DAG) const
Definition: TCEISelLowering.cc:893
llvm::TCESubtarget::getRegisterInfo
virtual const TargetRegisterInfo * getRegisterInfo() const override
Definition: TCESubtarget.cc:95
llvm::TCETargetLowering::LowerReturn
virtual SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, SDLOC_PARAM_TYPE dl, SelectionDAG &DAG) const override
Definition: TCEISelLowering.cc:97
llvm::TCETargetLowering::canEncodeImmediate
bool canEncodeImmediate(const ConstantSDNode &node) const
Definition: TCEISelLowering.cc:1709
TCEISD::SELECT_I32
@ SELECT_I32
Definition: TCEISelLowering.hh:50
llvm::TCETargetLowering::TCETargetLowering
TCETargetLowering(TargetMachine &TM, const TCESubtarget &subt)
Definition: TCEISelLowering.cc:528
LLVMTCECmdLineOptions.hh
TCETargetMachine.hh
DEFAULT_REG_CLASS
#define DEFAULT_REG_CLASS
Definition: TCEISelLowering.cc:81
llvm::TCETargetLowering::associatedVectorRegClass
std::pair< unsigned, const TargetRegisterClass * > associatedVectorRegClass(const EVT &vt) const
Implementation generated to Backend.inc from TDGenSIMD.cc.
Application.hh
Application::cmdLineOptions
static CmdLineOptions * cmdLineOptions()
Definition: Application.cc:397
llvm::TCETargetLowering::addVectorLowerings
void addVectorLowerings()
Implementation generated to Backend.inc from TDGenSIMD.cc.
llvm::TCETargetLowering::isOffsetFoldingLegal
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Definition: TCEISelLowering.cc:1661
TCEISD::SELECT_I1
@ SELECT_I1
Definition: TCEISelLowering.hh:47
llvm::TCETargetLowering::ExpandLibCall
SDValue ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, bool isSigned, SelectionDAG &DAG) const
Definition: TCEISelLowering.cc:1803
llvm::TCETargetMachine::missingOperations
const std::set< std::pair< unsigned, llvm::MVT::SimpleValueType > > * missingOperations()
Definition: TCETargetMachine.cc:319
llvm::TCETargetLowering::getRegClassForInlineAsmConstraint
std::vector< unsigned > getRegClassForInlineAsmConstraint(const std::string &Constraint, EVT VT) const
Definition: TCEISelLowering.cc:1644
llvm::TCETargetLowering::getVectorRegClassForInlineAsmConstraint
const TargetRegisterClass * getVectorRegClassForInlineAsmConstraint(const TargetRegisterInfo *TRI, MVT VT) const
Definition: TCEISelLowering.cc:1519
Machine.hh
llvm::TCETargetLowering::tm_
TCETargetMachine & tm_
Definition: TCEISelLowering.hh:176
TTAMachine::Machine::addressSpaceNavigator
virtual AddressSpaceNavigator addressSpaceNavigator() const
Definition: Machine.cc:392
llvm::TCETargetLowering::loadGAFromConstantPool_
std::map< unsigned, bool > loadGAFromConstantPool_
Predicates to tell whenever the addresses belonging to a address space should be loaded from constant...
Definition: TCEISelLowering.hh:181
LLVMTCECmdLineOptions
Definition: LLVMTCECmdLineOptions.hh:48
llvm::TCETargetLowering::lowerHWLoops
SDValue lowerHWLoops(SDValue op, SelectionDAG &dag) const
Definition: TCEISelLowering.cc:1340
llvm::TCETargetLowering::ReplaceNodeResults
void ReplaceNodeResults(SDNode *node, SmallVectorImpl< SDValue > &, SelectionDAG &) const override
Definition: TCEISelLowering.cc:1860
DEFAULT_TYPE
#define DEFAULT_TYPE
Definition: TCEISelLowering.cc:78
llvm::TCETargetLowering::LowerCall
virtual SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
Definition: TCEISelLowering.cc:323
TCEISD::CONST_POOL
@ CONST_POOL
Definition: TCEISelLowering.hh:56
TCEISD::BLOCK_ADDR
@ BLOCK_ADDR
Definition: TCEISelLowering.hh:58
llvm::TCETargetLowering::getRegForInlineAsmConstraint
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Definition: TCEISelLowering.cc:1544
TCEISelLowering.hh
TCEISD::SELECT_F16
@ SELECT_F16
Definition: TCEISelLowering.hh:52
llvm::TCETargetMachine::canEncodeAsMOVI
bool canEncodeAsMOVI(const llvm::MVT &vt, int64_t val) const
Definition: TCETargetMachine.cc:359
llvm::TCETargetLowering::getSetCCResultVT
virtual llvm::EVT getSetCCResultVT(const EVT &VT) const
Implementation generated to Backend.inc from TDGenSIMD.cc.
LLVMTCECmdLineOptions::disableHWLoops
bool disableHWLoops() const
Definition: LLVMTCECmdLineOptions.cc:458
TCEPlugin.hh
TCEString
Definition: TCEString.hh:53
llvm::TCETargetLowering::LowerOperation
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
Definition: TCEISelLowering.cc:1414
TCEISD::SRL_Const
@ SRL_Const
Definition: TCEISelLowering.hh:69
TCEISD::SELECT_I16
@ SELECT_I16
Definition: TCEISelLowering.hh:49
R
static RegisterPass< MachineDCE > R("machinedce","Symbol string based machine DCE for removing not used emulation functions", false, true)
llvm::TCETargetLowering::getTargetNodeName
virtual const char * getTargetNodeName(unsigned opcode) const override
Definition: TCEISelLowering.cc:873
llvm::TCETargetMachine::ttaMachine
virtual const TTAMachine::Machine & ttaMachine() const
Definition: TCETargetMachine.hh:137
MachineInfo::supportsPortGuardedJumps
static bool supportsPortGuardedJumps(const TTAMachine::Machine &machine)
Definition: MachineInfo.cc:663
llvm::TCETargetLowering::getConstraintType
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint letter, return the type of constraint it is for this target.
Definition: TCEISelLowering.cc:1508
llvm::TCETargetMachine
Definition: TCETargetMachine.hh:106
llvm::TCETargetMachine::stackAlignment
unsigned stackAlignment() const
Definition: TCETargetMachine.hh:277
llvm::TCETargetLowering::hasI1RegisterClass
bool hasI1RegisterClass() const
Definition: TCEISelLowering.cc:1756
llvm::TCETargetLowering::canEncodeConstantOperands
bool canEncodeConstantOperands(const SDNode &node) const
Definition: TCEISelLowering.cc:1694
llvm::TCETargetLowering::getVarArgsFrameOffset
int getVarArgsFrameOffset() const
Definition: TCEISelLowering.hh:92
llvm::TCETargetLowering::getConstShiftNodeAndTCEOP
std::pair< int, TCEString > getConstShiftNodeAndTCEOP(SDValue op) const
Definition: TCEISelLowering.cc:1260
TCEISD::FTOI
@ FTOI
Definition: TCEISelLowering.hh:62
llvm::TCETargetMachine::promotedOperations
const std::set< std::pair< unsigned, llvm::MVT::SimpleValueType > > * promotedOperations()
Definition: TCETargetMachine.cc:331
llvm::TCETargetLowering::LowerBuildBooleanVectorVector
SDValue LowerBuildBooleanVectorVector(SDValue Op, MVT newElementVT, int elemCount, SelectionDAG &DAG) const
Definition: TCEISelLowering.cc:1004
llvm::TCETargetLowering::hasI1RC_
bool hasI1RC_
Tells if the target machine has boolean register file.
Definition: TCEISelLowering.hh:174
TCEISD::ITOF
@ ITOF
Definition: TCEISelLowering.hh:63
MachineInfo::supportsBoolRegisterGuardedJumps
static bool supportsBoolRegisterGuardedJumps(const TTAMachine::Machine &machine)
Definition: MachineInfo.cc:616
llvm::TCETargetLowering::VarArgsFrameOffset
int VarArgsFrameOffset
Definition: TCEISelLowering.hh:84
llvm::TCETargetMachine::has16bitLoads
bool has16bitLoads() const
Definition: TCETargetMachine.hh:290
llvm::TCETargetLowering::shouldLoadFromConstantPool
bool shouldLoadFromConstantPool(unsigned addressSpace) const
Definition: TCEISelLowering.cc:1742
TCESubtarget.hh
SDLOC_PARAM_TYPE
#define SDLOC_PARAM_TYPE
Definition: TCEISelLowering.hh:76
llvm::TCETargetLowering::LowerGLOBALADDRESS
SDValue LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const
Definition: TCEISelLowering.cc:923
llvm::TCETargetMachine::largestImmValue
uint64_t largestImmValue() const
Definition: TCETargetMachine.hh:314
TCEISD::GLOBAL_ADDR
@ GLOBAL_ADDR
Definition: TCEISelLowering.hh:57