LLVM 23.0.0git
RISCVInstructionSelector.cpp
Go to the documentation of this file.
1//===-- RISCVInstructionSelector.cpp -----------------------------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the InstructionSelector class for
10/// RISC-V.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
16#include "RISCVSubtarget.h"
17#include "RISCVTargetMachine.h"
25#include "llvm/IR/IntrinsicsRISCV.h"
26#include "llvm/Support/Debug.h"
27
28#define DEBUG_TYPE "riscv-isel"
29
30using namespace llvm;
31using namespace MIPatternMatch;
32
33#define GET_GLOBALISEL_PREDICATE_BITSET
34#include "RISCVGenGlobalISel.inc"
35#undef GET_GLOBALISEL_PREDICATE_BITSET
36
37namespace {
38
39class RISCVInstructionSelector : public InstructionSelector {
40public:
41 RISCVInstructionSelector(const RISCVTargetMachine &TM,
42 const RISCVSubtarget &STI,
43 const RISCVRegisterBankInfo &RBI);
44
45 bool select(MachineInstr &MI) override;
46
47 void setupMF(MachineFunction &MF, GISelValueTracking *VT,
48 CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI,
49 BlockFrequencyInfo *BFI) override {
50 InstructionSelector::setupMF(MF, VT, CoverageInfo, PSI, BFI);
51 MRI = &MF.getRegInfo();
52 }
53
54 static const char *getName() { return DEBUG_TYPE; }
55
56private:
58 getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB) const;
59
60 static constexpr unsigned MaxRecursionDepth = 6;
61
62 bool hasAllNBitUsers(const MachineInstr &MI, unsigned Bits,
63 const unsigned Depth = 0) const;
64 bool hasAllHUsers(const MachineInstr &MI) const {
65 return hasAllNBitUsers(MI, 16);
66 }
67 bool hasAllWUsers(const MachineInstr &MI) const {
68 return hasAllNBitUsers(MI, 32);
69 }
70
71 bool isRegInGprb(Register Reg) const;
72 bool isRegInFprb(Register Reg) const;
73
74 // tblgen-erated 'select' implementation, used as the initial selector for
75 // the patterns that don't require complex C++.
76 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
77
78 // A lowering phase that runs before any selection attempts.
79 // Returns true if the instruction was modified.
80 void preISelLower(MachineInstr &MI);
81
82 bool replacePtrWithInt(MachineOperand &Op);
83
84 // Custom selection methods
85 bool selectCopy(MachineInstr &MI) const;
86 bool selectImplicitDef(MachineInstr &MI) const;
87 bool materializeImm(Register Reg, int64_t Imm, MachineInstr &MI) const;
88 bool selectAddr(MachineInstr &MI, bool IsLocal = true,
89 bool IsExternWeak = false) const;
90 bool selectSelect(MachineInstr &MI) const;
91 bool selectFPCompare(MachineInstr &MI) const;
92 void emitFence(AtomicOrdering FenceOrdering, SyncScope::ID FenceSSID,
93 MachineInstr &MI) const;
95 void addVectorLoadStoreOperands(MachineInstr &I,
97 unsigned &CurOp, bool IsMasked,
98 bool IsStridedOrIndexed,
99 LLT *IndexVT = nullptr) const;
100 bool selectIntrinsicWithSideEffects(MachineInstr &I) const;
101 bool selectIntrinsic(MachineInstr &I) const;
102 bool selectExtractSubvector(MachineInstr &MI) const;
103
104 ComplexRendererFns selectShiftMask(MachineOperand &Root,
105 unsigned ShiftWidth) const;
106 ComplexRendererFns selectShiftMaskXLen(MachineOperand &Root) const {
107 return selectShiftMask(Root, STI.getXLen());
108 }
109 ComplexRendererFns selectShiftMask32(MachineOperand &Root) const {
110 return selectShiftMask(Root, 32);
111 }
112 ComplexRendererFns selectAddrRegImm(MachineOperand &Root) const;
113
114 ComplexRendererFns selectSExtBits(MachineOperand &Root, unsigned Bits) const;
115 template <unsigned Bits>
116 ComplexRendererFns selectSExtBits(MachineOperand &Root) const {
117 return selectSExtBits(Root, Bits);
118 }
119
120 ComplexRendererFns selectZExtBits(MachineOperand &Root, unsigned Bits) const;
121 template <unsigned Bits>
122 ComplexRendererFns selectZExtBits(MachineOperand &Root) const {
123 return selectZExtBits(Root, Bits);
124 }
125
126 ComplexRendererFns selectSHXADDOp(MachineOperand &Root, unsigned ShAmt) const;
127 template <unsigned ShAmt>
128 ComplexRendererFns selectSHXADDOp(MachineOperand &Root) const {
129 return selectSHXADDOp(Root, ShAmt);
130 }
131
132 ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root,
133 unsigned ShAmt) const;
134 template <unsigned ShAmt>
135 ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root) const {
136 return selectSHXADD_UWOp(Root, ShAmt);
137 }
138
139 ComplexRendererFns renderVLOp(MachineOperand &Root) const;
140
141 // Custom renderers for tablegen
142 void renderNegImm(MachineInstrBuilder &MIB, const MachineInstr &MI,
143 int OpIdx) const;
144 void renderImmSubFromXLen(MachineInstrBuilder &MIB, const MachineInstr &MI,
145 int OpIdx) const;
146 void renderImmSubFrom32(MachineInstrBuilder &MIB, const MachineInstr &MI,
147 int OpIdx) const;
148 void renderImmPlus1(MachineInstrBuilder &MIB, const MachineInstr &MI,
149 int OpIdx) const;
150 void renderFrameIndex(MachineInstrBuilder &MIB, const MachineInstr &MI,
151 int OpIdx) const;
152
153 void renderTrailingZeros(MachineInstrBuilder &MIB, const MachineInstr &MI,
154 int OpIdx) const;
155 void renderXLenSubTrailingOnes(MachineInstrBuilder &MIB,
156 const MachineInstr &MI, int OpIdx) const;
157
158 void renderAddiPairImmLarge(MachineInstrBuilder &MIB, const MachineInstr &MI,
159 int OpIdx) const;
160 void renderAddiPairImmSmall(MachineInstrBuilder &MIB, const MachineInstr &MI,
161 int OpIdx) const;
162
163 const RISCVSubtarget &STI;
164 const RISCVInstrInfo &TII;
165 const RISCVRegisterInfo &TRI;
166 const RISCVRegisterBankInfo &RBI;
167 const RISCVTargetMachine &TM;
168
169 MachineRegisterInfo *MRI = nullptr;
170
171 // FIXME: This is necessary because DAGISel uses "Subtarget->" and GlobalISel
172 // uses "STI." in the code generated by TableGen. We need to unify the name of
173 // Subtarget variable.
174 const RISCVSubtarget *Subtarget = &STI;
175
176#define GET_GLOBALISEL_PREDICATES_DECL
177#include "RISCVGenGlobalISel.inc"
178#undef GET_GLOBALISEL_PREDICATES_DECL
179
180#define GET_GLOBALISEL_TEMPORARIES_DECL
181#include "RISCVGenGlobalISel.inc"
182#undef GET_GLOBALISEL_TEMPORARIES_DECL
183};
184
185} // end anonymous namespace
186
187#define GET_GLOBALISEL_IMPL
188#include "RISCVGenGlobalISel.inc"
189#undef GET_GLOBALISEL_IMPL
190
191RISCVInstructionSelector::RISCVInstructionSelector(
192 const RISCVTargetMachine &TM, const RISCVSubtarget &STI,
193 const RISCVRegisterBankInfo &RBI)
194 : STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI),
195 TM(TM),
196
198#include "RISCVGenGlobalISel.inc"
201#include "RISCVGenGlobalISel.inc"
203{
204}
205
206// Mimics optimizations in ISel and RISCVOptWInst Pass
207bool RISCVInstructionSelector::hasAllNBitUsers(const MachineInstr &MI,
208 unsigned Bits,
209 const unsigned Depth) const {
210
211 assert((MI.getOpcode() == TargetOpcode::G_ADD ||
212 MI.getOpcode() == TargetOpcode::G_SUB ||
213 MI.getOpcode() == TargetOpcode::G_MUL ||
214 MI.getOpcode() == TargetOpcode::G_SHL ||
215 MI.getOpcode() == TargetOpcode::G_LSHR ||
216 MI.getOpcode() == TargetOpcode::G_AND ||
217 MI.getOpcode() == TargetOpcode::G_OR ||
218 MI.getOpcode() == TargetOpcode::G_XOR ||
219 MI.getOpcode() == TargetOpcode::G_SEXT_INREG || Depth != 0) &&
220 "Unexpected opcode");
221
222 if (Depth >= RISCVInstructionSelector::MaxRecursionDepth)
223 return false;
224
225 auto DestReg = MI.getOperand(0).getReg();
226 for (auto &UserOp : MRI->use_nodbg_operands(DestReg)) {
227 assert(UserOp.getParent() && "UserOp must have a parent");
228 const MachineInstr &UserMI = *UserOp.getParent();
229 unsigned OpIdx = UserOp.getOperandNo();
230
231 switch (UserMI.getOpcode()) {
232 default:
233 return false;
234 case RISCV::ADDW:
235 case RISCV::ADDIW:
236 case RISCV::SUBW:
237 case RISCV::FCVT_D_W:
238 case RISCV::FCVT_S_W:
239 if (Bits >= 32)
240 break;
241 return false;
242 case RISCV::SLL:
243 case RISCV::SRA:
244 case RISCV::SRL:
245 // Shift amount operands only use log2(Xlen) bits.
246 if (OpIdx == 2 && Bits >= Log2_32(Subtarget->getXLen()))
247 break;
248 return false;
249 case RISCV::SLLI:
250 // SLLI only uses the lower (XLen - ShAmt) bits.
251 if (Bits >= Subtarget->getXLen() - UserMI.getOperand(2).getImm())
252 break;
253 return false;
254 case RISCV::ANDI:
255 if (Bits >= (unsigned)llvm::bit_width<uint64_t>(
256 (uint64_t)UserMI.getOperand(2).getImm()))
257 break;
258 goto RecCheck;
259 case RISCV::AND:
260 case RISCV::OR:
261 case RISCV::XOR:
262 RecCheck:
263 if (hasAllNBitUsers(UserMI, Bits, Depth + 1))
264 break;
265 return false;
266 case RISCV::SRLI: {
267 unsigned ShAmt = UserMI.getOperand(2).getImm();
268 // If we are shifting right by less than Bits, and users don't demand any
269 // bits that were shifted into [Bits-1:0], then we can consider this as an
270 // N-Bit user.
271 if (Bits > ShAmt && hasAllNBitUsers(UserMI, Bits - ShAmt, Depth + 1))
272 break;
273 return false;
274 }
275 }
276 }
277
278 return true;
279}
280
281InstructionSelector::ComplexRendererFns
282RISCVInstructionSelector::selectShiftMask(MachineOperand &Root,
283 unsigned ShiftWidth) const {
284 if (!Root.isReg())
285 return std::nullopt;
286
287 using namespace llvm::MIPatternMatch;
288
289 Register ShAmtReg = Root.getReg();
290 // Peek through zext.
291 Register ZExtSrcReg;
292 if (mi_match(ShAmtReg, *MRI, m_GZExt(m_Reg(ZExtSrcReg))))
293 ShAmtReg = ZExtSrcReg;
294
295 APInt AndMask;
296 Register AndSrcReg;
297 // Try to combine the following pattern (applicable to other shift
298 // instructions as well as 32-bit ones):
299 //
300 // %4:gprb(s64) = G_AND %3, %2
301 // %5:gprb(s64) = G_LSHR %1, %4(s64)
302 //
303 // According to RISC-V's ISA manual, SLL, SRL, and SRA ignore other bits than
304 // the lowest log2(XLEN) bits of register rs2. As for the above pattern, if
305 // the lowest log2(XLEN) bits of register rd and rs2 of G_AND are the same,
306 // then it can be eliminated. Given register rs1 or rs2 holding a constant
307 // (the and mask), there are two cases G_AND can be erased:
308 //
309 // 1. the lowest log2(XLEN) bits of the and mask are all set
310 // 2. the bits of the register being masked are already unset (zero set)
311 if (mi_match(ShAmtReg, *MRI, m_GAnd(m_Reg(AndSrcReg), m_ICst(AndMask)))) {
312 APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
313 if (ShMask.isSubsetOf(AndMask)) {
314 ShAmtReg = AndSrcReg;
315 } else {
316 // SimplifyDemandedBits may have optimized the mask so try restoring any
317 // bits that are known zero.
318 KnownBits Known = VT->getKnownBits(AndSrcReg);
319 if (ShMask.isSubsetOf(AndMask | Known.Zero))
320 ShAmtReg = AndSrcReg;
321 }
322 }
323
324 APInt Imm;
326 if (mi_match(ShAmtReg, *MRI, m_GAdd(m_Reg(Reg), m_ICst(Imm)))) {
327 if (Imm != 0 && Imm.urem(ShiftWidth) == 0)
328 // If we are shifting by X+N where N == 0 mod Size, then just shift by X
329 // to avoid the ADD.
330 ShAmtReg = Reg;
331 } else if (mi_match(ShAmtReg, *MRI, m_GSub(m_ICst(Imm), m_Reg(Reg)))) {
332 if (Imm != 0 && Imm.urem(ShiftWidth) == 0) {
333 // If we are shifting by N-X where N == 0 mod Size, then just shift by -X
334 // to generate a NEG instead of a SUB of a constant.
335 ShAmtReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
336 unsigned NegOpc = Subtarget->is64Bit() ? RISCV::SUBW : RISCV::SUB;
337 return {{[=](MachineInstrBuilder &MIB) {
338 MachineIRBuilder(*MIB.getInstr())
339 .buildInstr(NegOpc, {ShAmtReg}, {Register(RISCV::X0), Reg});
340 MIB.addReg(ShAmtReg);
341 }}};
342 }
343 if (Imm.urem(ShiftWidth) == ShiftWidth - 1) {
344 // If we are shifting by N-X where N == -1 mod Size, then just shift by ~X
345 // to generate a NOT instead of a SUB of a constant.
346 ShAmtReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
347 return {{[=](MachineInstrBuilder &MIB) {
348 MachineIRBuilder(*MIB.getInstr())
349 .buildInstr(RISCV::XORI, {ShAmtReg}, {Reg})
350 .addImm(-1);
351 MIB.addReg(ShAmtReg);
352 }}};
353 }
354 }
355
356 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(ShAmtReg); }}};
357}
358
359InstructionSelector::ComplexRendererFns
360RISCVInstructionSelector::selectSExtBits(MachineOperand &Root,
361 unsigned Bits) const {
362 if (!Root.isReg())
363 return std::nullopt;
364 Register RootReg = Root.getReg();
365 MachineInstr *RootDef = MRI->getVRegDef(RootReg);
366
367 if (RootDef->getOpcode() == TargetOpcode::G_SEXT_INREG &&
368 RootDef->getOperand(2).getImm() == Bits) {
369 return {
370 {[=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); }}};
371 }
372
373 unsigned Size = MRI->getType(RootReg).getScalarSizeInBits();
374 if ((Size - VT->computeNumSignBits(RootReg)) < Bits)
375 return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
376
377 return std::nullopt;
378}
379
380InstructionSelector::ComplexRendererFns
381RISCVInstructionSelector::selectZExtBits(MachineOperand &Root,
382 unsigned Bits) const {
383 if (!Root.isReg())
384 return std::nullopt;
385 Register RootReg = Root.getReg();
386
387 Register RegX;
388 uint64_t Mask = maskTrailingOnes<uint64_t>(Bits);
389 if (mi_match(RootReg, *MRI, m_GAnd(m_Reg(RegX), m_SpecificICst(Mask)))) {
390 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
391 }
392
393 if (mi_match(RootReg, *MRI, m_GZExt(m_Reg(RegX))) &&
394 MRI->getType(RegX).getScalarSizeInBits() == Bits)
395 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
396
397 unsigned Size = MRI->getType(RootReg).getScalarSizeInBits();
398 if (VT->maskedValueIsZero(RootReg, APInt::getBitsSetFrom(Size, Bits)))
399 return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
400
401 return std::nullopt;
402}
403
404InstructionSelector::ComplexRendererFns
405RISCVInstructionSelector::selectSHXADDOp(MachineOperand &Root,
406 unsigned ShAmt) const {
407 using namespace llvm::MIPatternMatch;
408
409 if (!Root.isReg())
410 return std::nullopt;
411 Register RootReg = Root.getReg();
412
413 const unsigned XLen = STI.getXLen();
414 APInt Mask, C2;
415 Register RegY;
416 std::optional<bool> LeftShift;
417 // (and (shl y, c2), mask)
418 if (mi_match(RootReg, *MRI,
419 m_GAnd(m_GShl(m_Reg(RegY), m_ICst(C2)), m_ICst(Mask))))
420 LeftShift = true;
421 // (and (lshr y, c2), mask)
422 else if (mi_match(RootReg, *MRI,
423 m_GAnd(m_GLShr(m_Reg(RegY), m_ICst(C2)), m_ICst(Mask))))
424 LeftShift = false;
425
426 if (LeftShift.has_value()) {
427 if (*LeftShift)
429 else
431
432 if (Mask.isShiftedMask()) {
433 unsigned Leading = XLen - Mask.getActiveBits();
434 unsigned Trailing = Mask.countr_zero();
435 // Given (and (shl y, c2), mask) in which mask has no leading zeros and
436 // c3 trailing zeros. We can use an SRLI by c3 - c2 followed by a SHXADD.
437 if (*LeftShift && Leading == 0 && C2.ult(Trailing) && Trailing == ShAmt) {
438 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
439 return {{[=](MachineInstrBuilder &MIB) {
440 MachineIRBuilder(*MIB.getInstr())
441 .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
442 .addImm(Trailing - C2.getLimitedValue());
443 MIB.addReg(DstReg);
444 }}};
445 }
446
447 // Given (and (lshr y, c2), mask) in which mask has c2 leading zeros and
448 // c3 trailing zeros. We can use an SRLI by c2 + c3 followed by a SHXADD.
449 if (!*LeftShift && Leading == C2 && Trailing == ShAmt) {
450 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
451 return {{[=](MachineInstrBuilder &MIB) {
452 MachineIRBuilder(*MIB.getInstr())
453 .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
454 .addImm(Leading + Trailing);
455 MIB.addReg(DstReg);
456 }}};
457 }
458 }
459 }
460
461 LeftShift.reset();
462
463 // (shl (and y, mask), c2)
464 if (mi_match(RootReg, *MRI,
465 m_GShl(m_OneNonDBGUse(m_GAnd(m_Reg(RegY), m_ICst(Mask))),
466 m_ICst(C2))))
467 LeftShift = true;
468 // (lshr (and y, mask), c2)
469 else if (mi_match(RootReg, *MRI,
471 m_ICst(C2))))
472 LeftShift = false;
473
474 if (LeftShift.has_value() && Mask.isShiftedMask()) {
475 unsigned Leading = XLen - Mask.getActiveBits();
476 unsigned Trailing = Mask.countr_zero();
477
478 // Given (shl (and y, mask), c2) in which mask has 32 leading zeros and
479 // c3 trailing zeros. If c1 + c3 == ShAmt, we can emit SRLIW + SHXADD.
480 bool Cond = *LeftShift && Leading == 32 && Trailing > 0 &&
481 (Trailing + C2.getLimitedValue()) == ShAmt;
482 if (!Cond)
483 // Given (lshr (and y, mask), c2) in which mask has 32 leading zeros and
484 // c3 trailing zeros. If c3 - c1 == ShAmt, we can emit SRLIW + SHXADD.
485 Cond = !*LeftShift && Leading == 32 && C2.ult(Trailing) &&
486 (Trailing - C2.getLimitedValue()) == ShAmt;
487
488 if (Cond) {
489 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
490 return {{[=](MachineInstrBuilder &MIB) {
491 MachineIRBuilder(*MIB.getInstr())
492 .buildInstr(RISCV::SRLIW, {DstReg}, {RegY})
493 .addImm(Trailing);
494 MIB.addReg(DstReg);
495 }}};
496 }
497 }
498
499 return std::nullopt;
500}
501
502InstructionSelector::ComplexRendererFns
503RISCVInstructionSelector::selectSHXADD_UWOp(MachineOperand &Root,
504 unsigned ShAmt) const {
505 using namespace llvm::MIPatternMatch;
506
507 if (!Root.isReg())
508 return std::nullopt;
509 Register RootReg = Root.getReg();
510
511 // Given (and (shl x, c2), mask) in which mask is a shifted mask with
512 // 32 - ShAmt leading zeros and c2 trailing zeros. We can use SLLI by
513 // c2 - ShAmt followed by SHXADD_UW with ShAmt for x amount.
514 APInt Mask, C2;
515 Register RegX;
516 if (mi_match(
517 RootReg, *MRI,
519 m_ICst(Mask))))) {
521
522 if (Mask.isShiftedMask()) {
523 unsigned Leading = Mask.countl_zero();
524 unsigned Trailing = Mask.countr_zero();
525 if (Leading == 32 - ShAmt && C2 == Trailing && Trailing > ShAmt) {
526 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
527 return {{[=](MachineInstrBuilder &MIB) {
528 MachineIRBuilder(*MIB.getInstr())
529 .buildInstr(RISCV::SLLI, {DstReg}, {RegX})
530 .addImm(C2.getLimitedValue() - ShAmt);
531 MIB.addReg(DstReg);
532 }}};
533 }
534 }
535 }
536
537 return std::nullopt;
538}
539
540InstructionSelector::ComplexRendererFns
541RISCVInstructionSelector::renderVLOp(MachineOperand &Root) const {
542 assert(Root.isReg() && "Expected operand to be a Register");
543 MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
544
545 if (RootDef->getOpcode() == TargetOpcode::G_CONSTANT) {
546 auto C = RootDef->getOperand(1).getCImm();
547 if (C->getValue().isAllOnes())
548 // If the operand is a G_CONSTANT with value of all ones it is larger than
549 // VLMAX. We convert it to an immediate with value VLMaxSentinel. This is
550 // recognized specially by the vsetvli insertion pass.
551 return {{[=](MachineInstrBuilder &MIB) {
552 MIB.addImm(RISCV::VLMaxSentinel);
553 }}};
554
555 if (isUInt<5>(C->getZExtValue())) {
556 uint64_t ZExtC = C->getZExtValue();
557 return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(ZExtC); }}};
558 }
559 }
560 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); }}};
561}
562
563InstructionSelector::ComplexRendererFns
564RISCVInstructionSelector::selectAddrRegImm(MachineOperand &Root) const {
565 if (!Root.isReg())
566 return std::nullopt;
567
568 MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
569 if (RootDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
570 return {{
571 [=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); },
572 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
573 }};
574 }
575
576 if (isBaseWithConstantOffset(Root, *MRI)) {
577 MachineOperand &LHS = RootDef->getOperand(1);
578 MachineOperand &RHS = RootDef->getOperand(2);
579 MachineInstr *LHSDef = MRI->getVRegDef(LHS.getReg());
580 MachineInstr *RHSDef = MRI->getVRegDef(RHS.getReg());
581
582 int64_t RHSC = RHSDef->getOperand(1).getCImm()->getSExtValue();
583 if (isInt<12>(RHSC)) {
584 if (LHSDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
585 return {{
586 [=](MachineInstrBuilder &MIB) { MIB.add(LHSDef->getOperand(1)); },
587 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); },
588 }};
589
590 return {{[=](MachineInstrBuilder &MIB) { MIB.add(LHS); },
591 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); }}};
592 }
593 }
594
595 // TODO: Need to get the immediate from a G_PTR_ADD. Should this be done in
596 // the combiner?
597 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
598 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }}};
599}
600
601/// Returns the RISCVCC::CondCode that corresponds to the CmpInst::Predicate CC.
602/// CC Must be an ICMP Predicate.
603static RISCVCC::CondCode getRISCVCCFromICmp(CmpInst::Predicate CC) {
604 switch (CC) {
605 default:
606 llvm_unreachable("Expected ICMP CmpInst::Predicate.");
607 case CmpInst::Predicate::ICMP_EQ:
608 return RISCVCC::COND_EQ;
609 case CmpInst::Predicate::ICMP_NE:
610 return RISCVCC::COND_NE;
611 case CmpInst::Predicate::ICMP_ULT:
612 return RISCVCC::COND_LTU;
613 case CmpInst::Predicate::ICMP_SLT:
614 return RISCVCC::COND_LT;
615 case CmpInst::Predicate::ICMP_UGE:
616 return RISCVCC::COND_GEU;
617 case CmpInst::Predicate::ICMP_SGE:
618 return RISCVCC::COND_GE;
619 }
620}
621
625 // Try to fold an ICmp. If that fails, use a NE compare with X0.
627 if (!mi_match(CondReg, MRI, m_GICmp(m_Pred(Pred), m_Reg(LHS), m_Reg(RHS)))) {
628 LHS = CondReg;
629 RHS = RISCV::X0;
630 CC = RISCVCC::COND_NE;
631 return;
632 }
633
634 // We found an ICmp, do some canonicalization.
635
636 // Adjust comparisons to use comparison with 0 if possible.
638 switch (Pred) {
640 // Convert X > -1 to X >= 0
641 if (*Constant == -1) {
642 CC = RISCVCC::COND_GE;
643 RHS = RISCV::X0;
644 return;
645 }
646 break;
648 // Convert X < 1 to 0 >= X
649 if (*Constant == 1) {
650 CC = RISCVCC::COND_GE;
651 RHS = LHS;
652 LHS = RISCV::X0;
653 return;
654 }
655 break;
656 default:
657 break;
658 }
659 }
660
661 switch (Pred) {
662 default:
663 llvm_unreachable("Expected ICMP CmpInst::Predicate.");
670 // These CCs are supported directly by RISC-V branches.
671 break;
676 // These CCs are not supported directly by RISC-V branches, but changing the
677 // direction of the CC and swapping LHS and RHS are.
678 Pred = CmpInst::getSwappedPredicate(Pred);
679 std::swap(LHS, RHS);
680 break;
681 }
682
683 CC = getRISCVCCFromICmp(Pred);
684}
685
686/// Select the RISC-V Zalasr opcode for the G_LOAD or G_STORE operation
687/// \p GenericOpc, appropriate for the GPR register bank and of memory access
688/// size \p OpSize.
689static unsigned selectZalasrLoadStoreOp(unsigned GenericOpc, unsigned OpSize) {
690 const bool IsStore = GenericOpc == TargetOpcode::G_STORE;
691 switch (OpSize) {
692 default:
693 llvm_unreachable("Unexpected memory size");
694 case 8:
695 return IsStore ? RISCV::SB_RL : RISCV::LB_AQ;
696 case 16:
697 return IsStore ? RISCV::SH_RL : RISCV::LH_AQ;
698 case 32:
699 return IsStore ? RISCV::SW_RL : RISCV::LW_AQ;
700 case 64:
701 return IsStore ? RISCV::SD_RL : RISCV::LD_AQ;
702 }
703}
704
705/// Select the RISC-V regimm opcode for the G_LOAD or G_STORE operation
706/// \p GenericOpc, appropriate for the GPR register bank and of memory access
707/// size \p OpSize. \returns \p GenericOpc if the combination is unsupported.
708static unsigned selectRegImmLoadStoreOp(unsigned GenericOpc, unsigned OpSize) {
709 const bool IsStore = GenericOpc == TargetOpcode::G_STORE;
710 switch (OpSize) {
711 case 8:
712 // Prefer unsigned due to no c.lb in Zcb.
713 return IsStore ? RISCV::SB : RISCV::LBU;
714 case 16:
715 return IsStore ? RISCV::SH : RISCV::LH;
716 case 32:
717 return IsStore ? RISCV::SW : RISCV::LW;
718 case 64:
719 return IsStore ? RISCV::SD : RISCV::LD;
720 }
721
722 return GenericOpc;
723}
724
725void RISCVInstructionSelector::addVectorLoadStoreOperands(
726 MachineInstr &I, SmallVectorImpl<Register> &SrcOps, unsigned &CurOp,
727 bool IsMasked, bool IsStridedOrIndexed, LLT *IndexVT) const {
728 // Base Pointer
729 auto PtrReg = I.getOperand(CurOp++).getReg();
730 SrcOps.push_back(PtrReg);
731
732 // Stride or Index
733 if (IsStridedOrIndexed) {
734 auto StrideReg = I.getOperand(CurOp++).getReg();
735 SrcOps.push_back(StrideReg);
736 if (IndexVT)
737 *IndexVT = MRI->getType(StrideReg);
738 }
739
740 // Mask
741 if (IsMasked) {
742 auto MaskReg = I.getOperand(CurOp++).getReg();
743 SrcOps.push_back(MaskReg);
744 }
745}
746
747bool RISCVInstructionSelector::selectIntrinsicWithSideEffects(
748 MachineInstr &I) const {
749 // Find the intrinsic ID.
750 unsigned IntrinID = cast<GIntrinsic>(I).getIntrinsicID();
751 // Select the instruction.
752 switch (IntrinID) {
753 default:
754 return false;
755 case Intrinsic::riscv_vlm:
756 case Intrinsic::riscv_vle:
757 case Intrinsic::riscv_vle_mask:
758 case Intrinsic::riscv_vlse:
759 case Intrinsic::riscv_vlse_mask: {
760 bool IsMasked = IntrinID == Intrinsic::riscv_vle_mask ||
761 IntrinID == Intrinsic::riscv_vlse_mask;
762 bool IsStrided = IntrinID == Intrinsic::riscv_vlse ||
763 IntrinID == Intrinsic::riscv_vlse_mask;
764 LLT VT = MRI->getType(I.getOperand(0).getReg());
765 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
766
767 // Result vector
768 const Register DstReg = I.getOperand(0).getReg();
769
770 // Sources
771 bool HasPassthruOperand = IntrinID != Intrinsic::riscv_vlm;
772 unsigned CurOp = 2;
773 SmallVector<Register, 4> SrcOps; // Source registers.
774
775 // Passthru
776 if (HasPassthruOperand) {
777 auto PassthruReg = I.getOperand(CurOp++).getReg();
778 SrcOps.push_back(PassthruReg);
779 } else {
780 SrcOps.push_back(Register(RISCV::NoRegister));
781 }
782
783 addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, IsStrided);
784
786 const RISCV::VLEPseudo *P =
787 RISCV::getVLEPseudo(IsMasked, IsStrided, /*FF*/ false, Log2SEW,
788 static_cast<unsigned>(LMUL));
789
790 MachineInstrBuilder PseudoMI =
791 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(P->Pseudo), DstReg);
792 for (Register Reg : SrcOps)
793 PseudoMI.addReg(Reg);
794
795 // Select VL
796 auto VLOpFn = renderVLOp(I.getOperand(CurOp++));
797 for (auto &RenderFn : *VLOpFn)
798 RenderFn(PseudoMI);
799
800 // SEW
801 PseudoMI.addImm(Log2SEW);
802
803 // Policy
804 uint64_t Policy = RISCVVType::MASK_AGNOSTIC;
805 if (IsMasked)
806 Policy = I.getOperand(CurOp++).getImm();
807 PseudoMI.addImm(Policy);
808
809 // Memref
810 PseudoMI.cloneMemRefs(I);
811
812 I.eraseFromParent();
813 constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);
814 return true;
815 }
816 case Intrinsic::riscv_vloxei:
817 case Intrinsic::riscv_vloxei_mask:
818 case Intrinsic::riscv_vluxei:
819 case Intrinsic::riscv_vluxei_mask: {
820 bool IsMasked = IntrinID == Intrinsic::riscv_vloxei_mask ||
821 IntrinID == Intrinsic::riscv_vluxei_mask;
822 bool IsOrdered = IntrinID == Intrinsic::riscv_vloxei ||
823 IntrinID == Intrinsic::riscv_vloxei_mask;
824 LLT VT = MRI->getType(I.getOperand(0).getReg());
825 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
826
827 // Result vector
828 const Register DstReg = I.getOperand(0).getReg();
829
830 // Sources
831 bool HasPassthruOperand = IntrinID != Intrinsic::riscv_vlm;
832 unsigned CurOp = 2;
833 SmallVector<Register, 4> SrcOps; // Source registers.
834
835 // Passthru
836 if (HasPassthruOperand) {
837 auto PassthruReg = I.getOperand(CurOp++).getReg();
838 SrcOps.push_back(PassthruReg);
839 } else {
840 // Use NoRegister if there is no specified passthru.
841 SrcOps.push_back(Register());
842 }
843 LLT IndexVT;
844 addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, true, &IndexVT);
845
847 RISCVVType::VLMUL IndexLMUL =
849 unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
850 if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
851 reportFatalUsageError("The V extension does not support EEW=64 for index "
852 "values when XLEN=32");
853 }
854 const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo(
855 IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
856 static_cast<unsigned>(IndexLMUL));
857
858 MachineInstrBuilder PseudoMI =
859 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(P->Pseudo), DstReg);
860 for (Register Reg : SrcOps)
861 PseudoMI.addReg(Reg);
862
863 // Select VL
864 auto VLOpFn = renderVLOp(I.getOperand(CurOp++));
865 for (auto &RenderFn : *VLOpFn)
866 RenderFn(PseudoMI);
867
868 // SEW
869 PseudoMI.addImm(Log2SEW);
870
871 // Policy
872 uint64_t Policy = RISCVVType::MASK_AGNOSTIC;
873 if (IsMasked)
874 Policy = I.getOperand(CurOp++).getImm();
875 PseudoMI.addImm(Policy);
876
877 // Memref
878 PseudoMI.cloneMemRefs(I);
879
880 I.eraseFromParent();
881 constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);
882 return true;
883 }
884 case Intrinsic::riscv_vsm:
885 case Intrinsic::riscv_vse:
886 case Intrinsic::riscv_vse_mask:
887 case Intrinsic::riscv_vsse:
888 case Intrinsic::riscv_vsse_mask: {
889 bool IsMasked = IntrinID == Intrinsic::riscv_vse_mask ||
890 IntrinID == Intrinsic::riscv_vsse_mask;
891 bool IsStrided = IntrinID == Intrinsic::riscv_vsse ||
892 IntrinID == Intrinsic::riscv_vsse_mask;
893 LLT VT = MRI->getType(I.getOperand(1).getReg());
894 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
895
896 // Sources
897 unsigned CurOp = 1;
898 SmallVector<Register, 4> SrcOps; // Source registers.
899
900 // Store value
901 auto PassthruReg = I.getOperand(CurOp++).getReg();
902 SrcOps.push_back(PassthruReg);
903
904 addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, IsStrided);
905
907 const RISCV::VSEPseudo *P = RISCV::getVSEPseudo(
908 IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
909
910 MachineInstrBuilder PseudoMI =
911 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(P->Pseudo));
912 for (Register Reg : SrcOps)
913 PseudoMI.addReg(Reg);
914
915 // Select VL
916 auto VLOpFn = renderVLOp(I.getOperand(CurOp++));
917 for (auto &RenderFn : *VLOpFn)
918 RenderFn(PseudoMI);
919
920 // SEW
921 PseudoMI.addImm(Log2SEW);
922
923 // Memref
924 PseudoMI.cloneMemRefs(I);
925
926 I.eraseFromParent();
927 constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);
928 return true;
929 }
930 case Intrinsic::riscv_vsoxei:
931 case Intrinsic::riscv_vsoxei_mask:
932 case Intrinsic::riscv_vsuxei:
933 case Intrinsic::riscv_vsuxei_mask: {
934 bool IsMasked = IntrinID == Intrinsic::riscv_vsoxei_mask ||
935 IntrinID == Intrinsic::riscv_vsuxei_mask;
936 bool IsOrdered = IntrinID == Intrinsic::riscv_vsoxei ||
937 IntrinID == Intrinsic::riscv_vsoxei_mask;
938 LLT VT = MRI->getType(I.getOperand(1).getReg());
939 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
940
941 // Sources
942 unsigned CurOp = 1;
943 SmallVector<Register, 4> SrcOps; // Source registers.
944
945 // Store value
946 auto PassthruReg = I.getOperand(CurOp++).getReg();
947 SrcOps.push_back(PassthruReg);
948
949 LLT IndexVT;
950 addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, true, &IndexVT);
951
953 RISCVVType::VLMUL IndexLMUL =
955 unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
956 if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
957 reportFatalUsageError("The V extension does not support EEW=64 for index "
958 "values when XLEN=32");
959 }
960 const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo(
961 IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
962 static_cast<unsigned>(IndexLMUL));
963
964 MachineInstrBuilder PseudoMI =
965 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(P->Pseudo));
966 for (Register Reg : SrcOps)
967 PseudoMI.addReg(Reg);
968
969 // Select VL
970 auto VLOpFn = renderVLOp(I.getOperand(CurOp++));
971 for (auto &RenderFn : *VLOpFn)
972 RenderFn(PseudoMI);
973
974 // SEW
975 PseudoMI.addImm(Log2SEW);
976
977 // Memref
978 PseudoMI.cloneMemRefs(I);
979
980 I.eraseFromParent();
981 constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);
982 return true;
983 }
984 }
985}
986
987bool RISCVInstructionSelector::selectIntrinsic(MachineInstr &I) const {
988 // Find the intrinsic ID.
989 unsigned IntrinID = cast<GIntrinsic>(I).getIntrinsicID();
990 // Select the instruction.
991 switch (IntrinID) {
992 default:
993 return false;
994 case Intrinsic::riscv_vsetvli:
995 case Intrinsic::riscv_vsetvlimax: {
996
997 bool VLMax = IntrinID == Intrinsic::riscv_vsetvlimax;
998
999 unsigned Offset = VLMax ? 2 : 3;
1000 unsigned SEW = RISCVVType::decodeVSEW(I.getOperand(Offset).getImm() & 0x7);
1001 RISCVVType::VLMUL VLMul =
1002 static_cast<RISCVVType::VLMUL>(I.getOperand(Offset + 1).getImm() & 0x7);
1003
1004 unsigned VTypeI = RISCVVType::encodeVTYPE(VLMul, SEW, /*TailAgnostic*/ true,
1005 /*MaskAgnostic*/ true);
1006
1007 Register DstReg = I.getOperand(0).getReg();
1008
1009 Register VLOperand;
1010 unsigned Opcode = RISCV::PseudoVSETVLI;
1011
1012 // Check if AVL is a constant that equals VLMAX.
1013 if (!VLMax) {
1014 Register AVLReg = I.getOperand(2).getReg();
1015 if (auto AVLConst = getIConstantVRegValWithLookThrough(AVLReg, *MRI)) {
1016 uint64_t AVL = AVLConst->Value.getZExtValue();
1017 if (auto VLEN = Subtarget->getRealVLen()) {
1018 if (*VLEN / RISCVVType::getSEWLMULRatio(SEW, VLMul) == AVL)
1019 VLMax = true;
1020 }
1021 }
1022
1023 MachineInstr *AVLDef = MRI->getVRegDef(AVLReg);
1024 if (AVLDef && AVLDef->getOpcode() == TargetOpcode::G_CONSTANT) {
1025 const auto *C = AVLDef->getOperand(1).getCImm();
1026 if (C->getValue().isAllOnes())
1027 VLMax = true;
1028 }
1029 }
1030
1031 if (VLMax) {
1032 VLOperand = Register(RISCV::X0);
1033 Opcode = RISCV::PseudoVSETVLIX0;
1034 } else {
1035 Register AVLReg = I.getOperand(2).getReg();
1036 VLOperand = AVLReg;
1037
1038 // Check if AVL is a small constant that can use PseudoVSETIVLI.
1039 if (auto AVLConst = getIConstantVRegValWithLookThrough(AVLReg, *MRI)) {
1040 uint64_t AVL = AVLConst->Value.getZExtValue();
1041 if (isUInt<5>(AVL)) {
1042 MachineInstr *PseudoMI =
1043 BuildMI(*I.getParent(), I, I.getDebugLoc(),
1044 TII.get(RISCV::PseudoVSETIVLI), DstReg)
1045 .addImm(AVL)
1046 .addImm(VTypeI);
1047 I.eraseFromParent();
1048 constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);
1049 return true;
1050 }
1051 }
1052 }
1053
1054 MachineInstr *PseudoMI =
1055 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode), DstReg)
1056 .addReg(VLOperand)
1057 .addImm(VTypeI);
1058 I.eraseFromParent();
1059 constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);
1060 return true;
1061 }
1062 }
1063}
1064
1065bool RISCVInstructionSelector::selectExtractSubvector(MachineInstr &MI) const {
1066 assert(MI.getOpcode() == TargetOpcode::G_EXTRACT_SUBVECTOR);
1067
1068 Register DstReg = MI.getOperand(0).getReg();
1069 Register SrcReg = MI.getOperand(1).getReg();
1070
1071 LLT DstTy = MRI->getType(DstReg);
1072 LLT SrcTy = MRI->getType(SrcReg);
1073
1074 unsigned Idx = static_cast<unsigned>(MI.getOperand(2).getImm());
1075
1076 MVT DstMVT = getMVTForLLT(DstTy);
1077 MVT SrcMVT = getMVTForLLT(SrcTy);
1078
1079 unsigned SubRegIdx;
1080 std::tie(SubRegIdx, Idx) =
1082 SrcMVT, DstMVT, Idx, &TRI);
1083
1084 if (Idx != 0)
1085 return false;
1086
1087 unsigned DstRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(DstMVT);
1088 const TargetRegisterClass *DstRC = TRI.getRegClass(DstRegClassID);
1089 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
1090 return false;
1091
1092 unsigned SrcRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(SrcMVT);
1093 const TargetRegisterClass *SrcRC = TRI.getRegClass(SrcRegClassID);
1094 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
1095 return false;
1096
1097 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII.get(TargetOpcode::COPY),
1098 DstReg)
1099 .addReg(SrcReg, {}, SubRegIdx);
1100
1101 MI.eraseFromParent();
1102 return true;
1103}
1104
1105bool RISCVInstructionSelector::select(MachineInstr &MI) {
1106 preISelLower(MI);
1107 const unsigned Opc = MI.getOpcode();
1108
1109 if (!MI.isPreISelOpcode() || Opc == TargetOpcode::G_PHI) {
1110 if (Opc == TargetOpcode::PHI || Opc == TargetOpcode::G_PHI) {
1111 const Register DefReg = MI.getOperand(0).getReg();
1112 const LLT DefTy = MRI->getType(DefReg);
1113
1114 const RegClassOrRegBank &RegClassOrBank =
1115 MRI->getRegClassOrRegBank(DefReg);
1116
1117 const TargetRegisterClass *DefRC =
1119 if (!DefRC) {
1120 if (!DefTy.isValid()) {
1121 LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
1122 return false;
1123 }
1124
1125 const RegisterBank &RB = *cast<const RegisterBank *>(RegClassOrBank);
1126 DefRC = getRegClassForTypeOnBank(DefTy, RB);
1127 if (!DefRC) {
1128 LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
1129 return false;
1130 }
1131 }
1132
1133 MI.setDesc(TII.get(TargetOpcode::PHI));
1134 return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
1135 }
1136
1137 // Certain non-generic instructions also need some special handling.
1138 if (MI.isCopy())
1139 return selectCopy(MI);
1140
1141 return true;
1142 }
1143
1144 if (selectImpl(MI, *CoverageInfo))
1145 return true;
1146
1147 switch (Opc) {
1148 case TargetOpcode::G_ANYEXT:
1149 case TargetOpcode::G_PTRTOINT:
1150 case TargetOpcode::G_INTTOPTR:
1151 case TargetOpcode::G_TRUNC:
1152 case TargetOpcode::G_FREEZE:
1153 return selectCopy(MI);
1154 case TargetOpcode::G_CONSTANT: {
1155 Register DstReg = MI.getOperand(0).getReg();
1156 int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue();
1157
1158 if (!materializeImm(DstReg, Imm, MI))
1159 return false;
1160
1161 MI.eraseFromParent();
1162 return true;
1163 }
1164 case TargetOpcode::G_ZEXT:
1165 case TargetOpcode::G_SEXT: {
1166 bool IsSigned = Opc != TargetOpcode::G_ZEXT;
1167 Register DstReg = MI.getOperand(0).getReg();
1168 Register SrcReg = MI.getOperand(1).getReg();
1169 LLT SrcTy = MRI->getType(SrcReg);
1170 unsigned SrcSize = SrcTy.getSizeInBits();
1171
1172 if (SrcTy.isVector())
1173 return false; // Should be handled by imported patterns.
1174
1175 assert((*RBI.getRegBank(DstReg, *MRI, TRI)).getID() ==
1176 RISCV::GPRBRegBankID &&
1177 "Unexpected ext regbank");
1178
1179 // Use addiw SrcReg, 0 (sext.w) for i32.
1180 if (IsSigned && SrcSize == 32) {
1181 MI.setDesc(TII.get(RISCV::ADDIW));
1182 MI.addOperand(MachineOperand::CreateImm(0));
1184 return true;
1185 }
1186
1187 // Use add.uw SrcReg, X0 (zext.w) for i32 with Zba.
1188 if (!IsSigned && SrcSize == 32 && STI.hasStdExtZba()) {
1189 MI.setDesc(TII.get(RISCV::ADD_UW));
1190 MI.addOperand(MachineOperand::CreateReg(RISCV::X0, /*isDef=*/false));
1192 return true;
1193 }
1194
1195 // Use sext.h/zext.h for i16 with Zbb.
1196 if (SrcSize == 16 &&
1197 (STI.hasStdExtZbb() || (!IsSigned && STI.hasStdExtZbkb()))) {
1198 MI.setDesc(TII.get(IsSigned ? RISCV::SEXT_H
1199 : STI.isRV64() ? RISCV::ZEXT_H_RV64
1200 : RISCV::ZEXT_H_RV32));
1202 return true;
1203 }
1204
1205 // Fall back to shift pair.
1206 Register ShiftLeftReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1207 MachineInstr *ShiftLeft = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1208 TII.get(RISCV::SLLI), ShiftLeftReg)
1209 .addReg(SrcReg)
1210 .addImm(STI.getXLen() - SrcSize);
1211 constrainSelectedInstRegOperands(*ShiftLeft, TII, TRI, RBI);
1212 MachineInstr *ShiftRight =
1213 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1214 TII.get(IsSigned ? RISCV::SRAI : RISCV::SRLI), DstReg)
1215 .addReg(ShiftLeftReg)
1216 .addImm(STI.getXLen() - SrcSize);
1217 constrainSelectedInstRegOperands(*ShiftRight, TII, TRI, RBI);
1218 MI.eraseFromParent();
1219 return true;
1220 }
1221 case TargetOpcode::G_FCONSTANT: {
1222 // TODO: Use constant pool for complex constants.
1223 Register DstReg = MI.getOperand(0).getReg();
1224 const APFloat &FPimm = MI.getOperand(1).getFPImm()->getValueAPF();
1225 unsigned Size = MRI->getType(DstReg).getSizeInBits();
1226 if (Size == 16 || Size == 32 || (Size == 64 && Subtarget->is64Bit())) {
1227 Register GPRReg;
1228 if (FPimm.isPosZero()) {
1229 GPRReg = RISCV::X0;
1230 } else {
1231 GPRReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1232 APInt Imm = FPimm.bitcastToAPInt();
1233 if (!materializeImm(GPRReg, Imm.getSExtValue(), MI))
1234 return false;
1235 }
1236
1237 unsigned Opcode = Size == 64 ? RISCV::FMV_D_X
1238 : Size == 32 ? RISCV::FMV_W_X
1239 : RISCV::FMV_H_X;
1240 MachineInstr *FMV = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1241 TII.get(Opcode), DstReg)
1242 .addReg(GPRReg);
1244 } else {
1245 // s64 on rv32
1246 assert(Size == 64 && !Subtarget->is64Bit() &&
1247 "Unexpected size or subtarget");
1248
1249 if (FPimm.isPosZero()) {
1250 // Optimize +0.0 to use fcvt.d.w
1251 MachineInstr *FCVT = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1252 TII.get(RISCV::FCVT_D_W), DstReg)
1253 .addReg(RISCV::X0)
1256
1257 MI.eraseFromParent();
1258 return true;
1259 }
1260
1261 // Split into two pieces and build through the stack.
1262 Register GPRRegHigh = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1263 Register GPRRegLow = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1264 APInt Imm = FPimm.bitcastToAPInt();
1265 if (!materializeImm(GPRRegHigh, Imm.extractBits(32, 32).getSExtValue(),
1266 MI))
1267 return false;
1268 if (!materializeImm(GPRRegLow, Imm.trunc(32).getSExtValue(), MI))
1269 return false;
1270 MachineInstr *PairF64 =
1271 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1272 TII.get(RISCV::BuildPairF64Pseudo), DstReg)
1273 .addReg(GPRRegLow)
1274 .addReg(GPRRegHigh);
1275 constrainSelectedInstRegOperands(*PairF64, TII, TRI, RBI);
1276 }
1277
1278 MI.eraseFromParent();
1279 return true;
1280 }
1281 case TargetOpcode::G_GLOBAL_VALUE: {
1282 auto *GV = MI.getOperand(1).getGlobal();
1283 if (GV->isThreadLocal()) {
1284 // TODO: implement this case.
1285 return false;
1286 }
1287
1288 return selectAddr(MI, GV->isDSOLocal(), GV->hasExternalWeakLinkage());
1289 }
1290 case TargetOpcode::G_JUMP_TABLE:
1291 case TargetOpcode::G_CONSTANT_POOL:
1292 return selectAddr(MI);
1293 case TargetOpcode::G_BRCOND: {
1294 Register LHS, RHS;
1296 getOperandsForBranch(MI.getOperand(0).getReg(), CC, LHS, RHS, *MRI);
1297
1298 MachineInstr *Bcc = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1299 TII.get(RISCVCC::getBrCond(CC)))
1300 .addReg(LHS)
1301 .addReg(RHS)
1302 .addMBB(MI.getOperand(1).getMBB());
1303 MI.eraseFromParent();
1305 return true;
1306 }
1307 case TargetOpcode::G_BRINDIRECT:
1308 MI.setDesc(TII.get(RISCV::PseudoBRIND));
1309 MI.addOperand(MachineOperand::CreateImm(0));
1311 return true;
1312 case TargetOpcode::G_SELECT:
1313 return selectSelect(MI);
1314 case TargetOpcode::G_FCMP:
1315 return selectFPCompare(MI);
1316 case TargetOpcode::G_FENCE: {
1317 AtomicOrdering FenceOrdering =
1318 static_cast<AtomicOrdering>(MI.getOperand(0).getImm());
1319 SyncScope::ID FenceSSID =
1320 static_cast<SyncScope::ID>(MI.getOperand(1).getImm());
1321 emitFence(FenceOrdering, FenceSSID, MI);
1322 MI.eraseFromParent();
1323 return true;
1324 }
1325 case TargetOpcode::G_IMPLICIT_DEF:
1326 return selectImplicitDef(MI);
1327 case TargetOpcode::G_UNMERGE_VALUES:
1328 return selectUnmergeValues(MI);
1329 case TargetOpcode::G_LOAD:
1330 case TargetOpcode::G_STORE: {
1331 GLoadStore &LdSt = cast<GLoadStore>(MI);
1332 const Register ValReg = LdSt.getReg(0);
1333 const Register PtrReg = LdSt.getPointerReg();
1334 LLT PtrTy = MRI->getType(PtrReg);
1335
1336 const RegisterBank &RB = *RBI.getRegBank(ValReg, *MRI, TRI);
1337 if (RB.getID() != RISCV::GPRBRegBankID)
1338 return false;
1339
1340#ifndef NDEBUG
1341 const RegisterBank &PtrRB = *RBI.getRegBank(PtrReg, *MRI, TRI);
1342 // Check that the pointer register is valid.
1343 assert(PtrRB.getID() == RISCV::GPRBRegBankID &&
1344 "Load/Store pointer operand isn't a GPR");
1345 assert(PtrTy.isPointer() && "Load/Store pointer operand isn't a pointer");
1346#endif
1347
1348 // Can only handle AddressSpace 0.
1349 if (PtrTy.getAddressSpace() != 0)
1350 return false;
1351
1352 unsigned MemSize = LdSt.getMemSizeInBits().getValue();
1353 AtomicOrdering Order = LdSt.getMMO().getSuccessOrdering();
1354
1355 if (isStrongerThanMonotonic(Order)) {
1356 MI.setDesc(TII.get(selectZalasrLoadStoreOp(Opc, MemSize)));
1358 return true;
1359 }
1360
1361 const unsigned NewOpc = selectRegImmLoadStoreOp(MI.getOpcode(), MemSize);
1362 if (NewOpc == MI.getOpcode())
1363 return false;
1364
1365 // Check if we can fold anything into the addressing mode.
1366 auto AddrModeFns = selectAddrRegImm(MI.getOperand(1));
1367 if (!AddrModeFns)
1368 return false;
1369
1370 // Folded something. Create a new instruction and return it.
1371 MachineInstrBuilder NewInst =
1372 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII.get(NewOpc));
1373 NewInst.setMIFlags(MI.getFlags());
1374 if (isa<GStore>(MI))
1375 NewInst.addUse(ValReg);
1376 else
1377 NewInst.addDef(ValReg);
1378 NewInst.cloneMemRefs(MI);
1379 for (auto &Fn : *AddrModeFns)
1380 Fn(NewInst);
1381 MI.eraseFromParent();
1382
1383 constrainSelectedInstRegOperands(*NewInst, TII, TRI, RBI);
1384 return true;
1385 }
1386 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1387 return selectIntrinsicWithSideEffects(MI);
1388 case TargetOpcode::G_INTRINSIC:
1389 return selectIntrinsic(MI);
1390 case TargetOpcode::G_EXTRACT_SUBVECTOR:
1391 return selectExtractSubvector(MI);
1392 default:
1393 return false;
1394 }
1395}
1396
1397bool RISCVInstructionSelector::selectUnmergeValues(MachineInstr &MI) const {
1398 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
1399
1400 if (!Subtarget->hasStdExtZfa())
1401 return false;
1402
1403 // Split F64 Src into two s32 parts
1404 if (MI.getNumOperands() != 3)
1405 return false;
1406 Register Src = MI.getOperand(2).getReg();
1407 Register Lo = MI.getOperand(0).getReg();
1408 Register Hi = MI.getOperand(1).getReg();
1409 if (!isRegInFprb(Src) || !isRegInGprb(Lo) || !isRegInGprb(Hi))
1410 return false;
1411
1412 MachineInstr *ExtractLo = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1413 TII.get(RISCV::FMV_X_W_FPR64), Lo)
1414 .addReg(Src);
1415 constrainSelectedInstRegOperands(*ExtractLo, TII, TRI, RBI);
1416
1417 MachineInstr *ExtractHi = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1418 TII.get(RISCV::FMVH_X_D), Hi)
1419 .addReg(Src);
1420 constrainSelectedInstRegOperands(*ExtractHi, TII, TRI, RBI);
1421
1422 MI.eraseFromParent();
1423 return true;
1424}
1425
1426bool RISCVInstructionSelector::replacePtrWithInt(MachineOperand &Op) {
1427 Register PtrReg = Op.getReg();
1428 assert(MRI->getType(PtrReg).isPointer() && "Operand is not a pointer!");
1429
1430 const LLT sXLen = LLT::scalar(STI.getXLen());
1431 MachineInstr &ParentMI = *Op.getParent();
1432 Register IntReg = MRI->createGenericVirtualRegister(sXLen);
1433 MRI->setRegBank(IntReg, RBI.getRegBank(RISCV::GPRBRegBankID));
1434 MachineInstr *PtrToInt =
1435 BuildMI(*ParentMI.getParent(), ParentMI, ParentMI.getDebugLoc(),
1436 TII.get(TargetOpcode::G_PTRTOINT), IntReg)
1437 .addReg(PtrReg);
1438 Op.setReg(IntReg);
1439 return select(*PtrToInt);
1440}
1441
1442void RISCVInstructionSelector::preISelLower(MachineInstr &MI) {
1443 switch (MI.getOpcode()) {
1444 case TargetOpcode::G_PTR_ADD: {
1445 Register DstReg = MI.getOperand(0).getReg();
1446 const LLT sXLen = LLT::scalar(STI.getXLen());
1447
1448 replacePtrWithInt(MI.getOperand(1));
1449 MI.setDesc(TII.get(TargetOpcode::G_ADD));
1450 MRI->setType(DstReg, sXLen);
1451 break;
1452 }
1453 case TargetOpcode::G_PTRMASK: {
1454 Register DstReg = MI.getOperand(0).getReg();
1455 const LLT sXLen = LLT::scalar(STI.getXLen());
1456 replacePtrWithInt(MI.getOperand(1));
1457 MI.setDesc(TII.get(TargetOpcode::G_AND));
1458 MRI->setType(DstReg, sXLen);
1459 break;
1460 }
1461 }
1462}
1463
1464void RISCVInstructionSelector::renderNegImm(MachineInstrBuilder &MIB,
1465 const MachineInstr &MI,
1466 int OpIdx) const {
1467 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1468 "Expected G_CONSTANT");
1469 int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();
1470 MIB.addImm(-CstVal);
1471}
1472
1473void RISCVInstructionSelector::renderImmSubFromXLen(MachineInstrBuilder &MIB,
1474 const MachineInstr &MI,
1475 int OpIdx) const {
1476 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1477 "Expected G_CONSTANT");
1478 uint64_t CstVal = MI.getOperand(1).getCImm()->getZExtValue();
1479 MIB.addImm(STI.getXLen() - CstVal);
1480}
1481
1482void RISCVInstructionSelector::renderImmSubFrom32(MachineInstrBuilder &MIB,
1483 const MachineInstr &MI,
1484 int OpIdx) const {
1485 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1486 "Expected G_CONSTANT");
1487 uint64_t CstVal = MI.getOperand(1).getCImm()->getZExtValue();
1488 MIB.addImm(32 - CstVal);
1489}
1490
1491void RISCVInstructionSelector::renderImmPlus1(MachineInstrBuilder &MIB,
1492 const MachineInstr &MI,
1493 int OpIdx) const {
1494 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1495 "Expected G_CONSTANT");
1496 int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();
1497 MIB.addImm(CstVal + 1);
1498}
1499
1500void RISCVInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
1501 const MachineInstr &MI,
1502 int OpIdx) const {
1503 assert(MI.getOpcode() == TargetOpcode::G_FRAME_INDEX && OpIdx == -1 &&
1504 "Expected G_FRAME_INDEX");
1505 MIB.add(MI.getOperand(1));
1506}
1507
1508void RISCVInstructionSelector::renderTrailingZeros(MachineInstrBuilder &MIB,
1509 const MachineInstr &MI,
1510 int OpIdx) const {
1511 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1512 "Expected G_CONSTANT");
1513 uint64_t C = MI.getOperand(1).getCImm()->getZExtValue();
1515}
1516
1517void RISCVInstructionSelector::renderXLenSubTrailingOnes(
1518 MachineInstrBuilder &MIB, const MachineInstr &MI, int OpIdx) const {
1519 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1520 "Expected G_CONSTANT");
1521 uint64_t C = MI.getOperand(1).getCImm()->getZExtValue();
1522 MIB.addImm(Subtarget->getXLen() - llvm::countr_one(C));
1523}
1524
1525void RISCVInstructionSelector::renderAddiPairImmSmall(MachineInstrBuilder &MIB,
1526 const MachineInstr &MI,
1527 int OpIdx) const {
1528 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1529 "Expected G_CONSTANT");
1530 int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue();
1531 int64_t Adj = Imm < 0 ? -2048 : 2047;
1532 MIB.addImm(Imm - Adj);
1533}
1534
1535void RISCVInstructionSelector::renderAddiPairImmLarge(MachineInstrBuilder &MIB,
1536 const MachineInstr &MI,
1537 int OpIdx) const {
1538 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1539 "Expected G_CONSTANT");
1540 int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue() < 0 ? -2048 : 2047;
1541 MIB.addImm(Imm);
1542}
1543
1544const TargetRegisterClass *RISCVInstructionSelector::getRegClassForTypeOnBank(
1545 LLT Ty, const RegisterBank &RB) const {
1546 if (RB.getID() == RISCV::GPRBRegBankID) {
1547 if (Ty.getSizeInBits() <= 32 || (STI.is64Bit() && Ty.getSizeInBits() == 64))
1548 return &RISCV::GPRRegClass;
1549 }
1550
1551 if (RB.getID() == RISCV::FPRBRegBankID) {
1552 if (Ty.getSizeInBits() == 16)
1553 return &RISCV::FPR16RegClass;
1554 if (Ty.getSizeInBits() == 32)
1555 return &RISCV::FPR32RegClass;
1556 if (Ty.getSizeInBits() == 64)
1557 return &RISCV::FPR64RegClass;
1558 }
1559
1560 if (RB.getID() == RISCV::VRBRegBankID) {
1561 if (Ty.getSizeInBits().getKnownMinValue() <= 64)
1562 return &RISCV::VRRegClass;
1563
1564 if (Ty.getSizeInBits().getKnownMinValue() == 128)
1565 return &RISCV::VRM2RegClass;
1566
1567 if (Ty.getSizeInBits().getKnownMinValue() == 256)
1568 return &RISCV::VRM4RegClass;
1569
1570 if (Ty.getSizeInBits().getKnownMinValue() == 512)
1571 return &RISCV::VRM8RegClass;
1572 }
1573
1574 return nullptr;
1575}
1576
1577bool RISCVInstructionSelector::isRegInGprb(Register Reg) const {
1578 return RBI.getRegBank(Reg, *MRI, TRI)->getID() == RISCV::GPRBRegBankID;
1579}
1580
1581bool RISCVInstructionSelector::isRegInFprb(Register Reg) const {
1582 return RBI.getRegBank(Reg, *MRI, TRI)->getID() == RISCV::FPRBRegBankID;
1583}
1584
1585bool RISCVInstructionSelector::selectCopy(MachineInstr &MI) const {
1586 Register DstReg = MI.getOperand(0).getReg();
1587
1588 if (DstReg.isPhysical())
1589 return true;
1590
1591 const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
1592 MRI->getType(DstReg), *RBI.getRegBank(DstReg, *MRI, TRI));
1593 assert(DstRC &&
1594 "Register class not available for LLT, register bank combination");
1595
1596 // No need to constrain SrcReg. It will get constrained when
1597 // we hit another of its uses or its defs.
1598 // Copies do not have constraints.
1599 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1600 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode())
1601 << " operand\n");
1602 return false;
1603 }
1604
1605 MI.setDesc(TII.get(RISCV::COPY));
1606 return true;
1607}
1608
1609bool RISCVInstructionSelector::selectImplicitDef(MachineInstr &MI) const {
1610 assert(MI.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
1611
1612 const Register DstReg = MI.getOperand(0).getReg();
1613 const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
1614 MRI->getType(DstReg), *RBI.getRegBank(DstReg, *MRI, TRI));
1615
1616 assert(DstRC &&
1617 "Register class not available for LLT, register bank combination");
1618
1619 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1620 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode())
1621 << " operand\n");
1622 }
1623 MI.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
1624 return true;
1625}
1626
1627bool RISCVInstructionSelector::materializeImm(Register DstReg, int64_t Imm,
1628 MachineInstr &MI) const {
1629 MachineBasicBlock &MBB = *MI.getParent();
1630 DebugLoc DL = MI.getDebugLoc();
1631
1632 if (Imm == 0) {
1633 BuildMI(MBB, MI, DL, TII.get(TargetOpcode::COPY), DstReg).addReg(RISCV::X0);
1634 RBI.constrainGenericRegister(DstReg, RISCV::GPRRegClass, *MRI);
1635 return true;
1636 }
1637
1639 unsigned NumInsts = Seq.size();
1640 Register SrcReg = RISCV::X0;
1641
1642 for (unsigned i = 0; i < NumInsts; i++) {
1643 Register TmpReg = i < NumInsts - 1
1644 ? MRI->createVirtualRegister(&RISCV::GPRRegClass)
1645 : DstReg;
1646 const RISCVMatInt::Inst &I = Seq[i];
1647 MachineInstr *Result;
1648
1649 switch (I.getOpndKind()) {
1650 case RISCVMatInt::Imm:
1651 // clang-format off
1652 Result = BuildMI(MBB, MI, DL, TII.get(I.getOpcode()), TmpReg)
1653 .addImm(I.getImm());
1654 // clang-format on
1655 break;
1656 case RISCVMatInt::RegX0:
1657 Result = BuildMI(MBB, MI, DL, TII.get(I.getOpcode()), TmpReg)
1658 .addReg(SrcReg)
1659 .addReg(RISCV::X0);
1660 break;
1662 Result = BuildMI(MBB, MI, DL, TII.get(I.getOpcode()), TmpReg)
1663 .addReg(SrcReg)
1664 .addReg(SrcReg);
1665 break;
1667 Result = BuildMI(MBB, MI, DL, TII.get(I.getOpcode()), TmpReg)
1668 .addReg(SrcReg)
1669 .addImm(I.getImm());
1670 break;
1671 }
1672
1674
1675 SrcReg = TmpReg;
1676 }
1677
1678 return true;
1679}
1680
1681bool RISCVInstructionSelector::selectAddr(MachineInstr &MI, bool IsLocal,
1682 bool IsExternWeak) const {
1683 assert((MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
1684 MI.getOpcode() == TargetOpcode::G_JUMP_TABLE ||
1685 MI.getOpcode() == TargetOpcode::G_CONSTANT_POOL) &&
1686 "Unexpected opcode");
1687
1688 const MachineOperand &DispMO = MI.getOperand(1);
1689
1690 Register DefReg = MI.getOperand(0).getReg();
1691 const LLT DefTy = MRI->getType(DefReg);
1692
1693 // When HWASAN is used and tagging of global variables is enabled
1694 // they should be accessed via the GOT, since the tagged address of a global
1695 // is incompatible with existing code models. This also applies to non-pic
1696 // mode.
1697 if (TM.isPositionIndependent() || Subtarget->allowTaggedGlobals()) {
1698 if (IsLocal && !Subtarget->allowTaggedGlobals()) {
1699 // Use PC-relative addressing to access the symbol. This generates the
1700 // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
1701 // %pcrel_lo(auipc)).
1702 MI.setDesc(TII.get(RISCV::PseudoLLA));
1704 return true;
1705 }
1706
1707 // Use PC-relative addressing to access the GOT for this symbol, then
1708 // load the address from the GOT. This generates the pattern (PseudoLGA
1709 // sym), which expands to (ld (addi (auipc %got_pcrel_hi(sym))
1710 // %pcrel_lo(auipc))).
1711 MachineFunction &MF = *MI.getParent()->getParent();
1712 MachineMemOperand *MemOp = MF.getMachineMemOperand(
1716 DefTy, Align(DefTy.getSizeInBits() / 8));
1717
1718 MachineInstr *Result = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1719 TII.get(RISCV::PseudoLGA), DefReg)
1720 .addDisp(DispMO, 0)
1721 .addMemOperand(MemOp);
1722
1724
1725 MI.eraseFromParent();
1726 return true;
1727 }
1728
1729 switch (TM.getCodeModel()) {
1730 default: {
1732 "Unsupported code model for lowering", MI);
1733 return false;
1734 }
1735 case CodeModel::Small: {
1736 // Must lie within a single 2 GiB address range and must lie between
1737 // absolute addresses -2 GiB and +2 GiB. This generates the pattern (addi
1738 // (lui %hi(sym)) %lo(sym)).
1739 Register AddrHiDest = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1740 MachineInstr *AddrHi = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1741 TII.get(RISCV::LUI), AddrHiDest)
1742 .addDisp(DispMO, 0, RISCVII::MO_HI);
1743
1745
1746 MachineInstr *Result = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1747 TII.get(RISCV::ADDI), DefReg)
1748 .addReg(AddrHiDest)
1749 .addDisp(DispMO, 0, RISCVII::MO_LO);
1750
1752
1753 MI.eraseFromParent();
1754 return true;
1755 }
1756 case CodeModel::Medium:
1757 // Emit LGA/LLA instead of the sequence it expands to because the pcrel_lo
1758 // relocation needs to reference a label that points to the auipc
1759 // instruction itself, not the global. This cannot be done inside the
1760 // instruction selector.
1761 if (IsExternWeak) {
1762 // An extern weak symbol may be undefined, i.e. have value 0, which may
1763 // not be within 2GiB of PC, so use GOT-indirect addressing to access the
1764 // symbol. This generates the pattern (PseudoLGA sym), which expands to
1765 // (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
1766 MachineFunction &MF = *MI.getParent()->getParent();
1767 MachineMemOperand *MemOp = MF.getMachineMemOperand(
1771 DefTy, Align(DefTy.getSizeInBits() / 8));
1772
1773 MachineInstr *Result = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1774 TII.get(RISCV::PseudoLGA), DefReg)
1775 .addDisp(DispMO, 0)
1776 .addMemOperand(MemOp);
1777
1779
1780 MI.eraseFromParent();
1781 return true;
1782 }
1783
1784 // Generate a sequence for accessing addresses within any 2GiB range
1785 // within the address space. This generates the pattern (PseudoLLA sym),
1786 // which expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
1787 MI.setDesc(TII.get(RISCV::PseudoLLA));
1789 return true;
1790 }
1791
1792 return false;
1793}
1794
1795bool RISCVInstructionSelector::selectSelect(MachineInstr &MI) const {
1796 auto &SelectMI = cast<GSelect>(MI);
1797
1798 Register LHS, RHS;
1800 getOperandsForBranch(SelectMI.getCondReg(), CC, LHS, RHS, *MRI);
1801
1802 Register DstReg = SelectMI.getReg(0);
1803
1804 unsigned Opc = RISCV::Select_GPR_Using_CC_GPR;
1805 if (RBI.getRegBank(DstReg, *MRI, TRI)->getID() == RISCV::FPRBRegBankID) {
1806 unsigned Size = MRI->getType(DstReg).getSizeInBits();
1807 Opc = Size == 32 ? RISCV::Select_FPR32_Using_CC_GPR
1808 : RISCV::Select_FPR64_Using_CC_GPR;
1809 }
1810
1811 MachineInstr *Result =
1812 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII.get(Opc))
1813 .addDef(DstReg)
1814 .addReg(LHS)
1815 .addReg(RHS)
1816 .addImm(CC)
1817 .addReg(SelectMI.getTrueReg())
1818 .addReg(SelectMI.getFalseReg());
1819 MI.eraseFromParent();
1821 return true;
1822}
1823
1824// Convert an FCMP predicate to one of the supported F or D instructions.
1825static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size) {
1826 assert((Size == 16 || Size == 32 || Size == 64) && "Unsupported size");
1827 switch (Pred) {
1828 default:
1829 llvm_unreachable("Unsupported predicate");
1830 case CmpInst::FCMP_OLT:
1831 return Size == 16 ? RISCV::FLT_H : Size == 32 ? RISCV::FLT_S : RISCV::FLT_D;
1832 case CmpInst::FCMP_OLE:
1833 return Size == 16 ? RISCV::FLE_H : Size == 32 ? RISCV::FLE_S : RISCV::FLE_D;
1834 case CmpInst::FCMP_OEQ:
1835 return Size == 16 ? RISCV::FEQ_H : Size == 32 ? RISCV::FEQ_S : RISCV::FEQ_D;
1836 }
1837}
1838
1839// Try legalizing an FCMP by swapping or inverting the predicate to one that
1840// is supported.
1842 CmpInst::Predicate &Pred, bool &NeedInvert) {
1843 auto isLegalFCmpPredicate = [](CmpInst::Predicate Pred) {
1844 return Pred == CmpInst::FCMP_OLT || Pred == CmpInst::FCMP_OLE ||
1845 Pred == CmpInst::FCMP_OEQ;
1846 };
1847
1848 assert(!isLegalFCmpPredicate(Pred) && "Predicate already legal?");
1849
1851 if (isLegalFCmpPredicate(InvPred)) {
1852 Pred = InvPred;
1853 std::swap(LHS, RHS);
1854 return true;
1855 }
1856
1857 InvPred = CmpInst::getInversePredicate(Pred);
1858 NeedInvert = true;
1859 if (isLegalFCmpPredicate(InvPred)) {
1860 Pred = InvPred;
1861 return true;
1862 }
1863 InvPred = CmpInst::getSwappedPredicate(InvPred);
1864 if (isLegalFCmpPredicate(InvPred)) {
1865 Pred = InvPred;
1866 std::swap(LHS, RHS);
1867 return true;
1868 }
1869
1870 return false;
1871}
1872
1873// Emit a sequence of instructions to compare LHS and RHS using Pred. Return
1874// the result in DstReg.
1875// FIXME: Maybe we should expand this earlier.
1876bool RISCVInstructionSelector::selectFPCompare(MachineInstr &MI) const {
1877 auto &CmpMI = cast<GFCmp>(MI);
1878 CmpInst::Predicate Pred = CmpMI.getCond();
1879
1880 Register DstReg = CmpMI.getReg(0);
1881 Register LHS = CmpMI.getLHSReg();
1882 Register RHS = CmpMI.getRHSReg();
1883
1884 unsigned Size = MRI->getType(LHS).getSizeInBits();
1885 assert((Size == 16 || Size == 32 || Size == 64) && "Unexpected size");
1886
1887 Register TmpReg = DstReg;
1888
1889 bool NeedInvert = false;
1890 // First try swapping operands or inverting.
1891 if (legalizeFCmpPredicate(LHS, RHS, Pred, NeedInvert)) {
1892 if (NeedInvert)
1893 TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1894 MachineInstr *Cmp = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1895 TII.get(getFCmpOpcode(Pred, Size)), TmpReg)
1896 .addReg(LHS)
1897 .addReg(RHS);
1899 } else if (Pred == CmpInst::FCMP_ONE || Pred == CmpInst::FCMP_UEQ) {
1900 // fcmp one LHS, RHS => (OR (FLT LHS, RHS), (FLT RHS, LHS))
1901 NeedInvert = Pred == CmpInst::FCMP_UEQ;
1902 Register Cmp1Reg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1903 MachineInstr *Cmp1 =
1904 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1905 TII.get(getFCmpOpcode(CmpInst::FCMP_OLT, Size)), Cmp1Reg)
1906 .addReg(LHS)
1907 .addReg(RHS);
1909 Register Cmp2Reg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1910 MachineInstr *Cmp2 =
1911 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1912 TII.get(getFCmpOpcode(CmpInst::FCMP_OLT, Size)), Cmp2Reg)
1913 .addReg(RHS)
1914 .addReg(LHS);
1916 if (NeedInvert)
1917 TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1918 MachineInstr *Or = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1919 TII.get(RISCV::OR), TmpReg)
1920 .addReg(Cmp1Reg)
1921 .addReg(Cmp2Reg);
1923 } else if (Pred == CmpInst::FCMP_ORD || Pred == CmpInst::FCMP_UNO) {
1924 // fcmp ord LHS, RHS => (AND (FEQ LHS, LHS), (FEQ RHS, RHS))
1925 // FIXME: If LHS and RHS are the same we can use a single FEQ.
1926 NeedInvert = Pred == CmpInst::FCMP_UNO;
1927 Register Cmp1Reg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1928 MachineInstr *Cmp1 =
1929 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1930 TII.get(getFCmpOpcode(CmpInst::FCMP_OEQ, Size)), Cmp1Reg)
1931 .addReg(LHS)
1932 .addReg(LHS);
1934 Register Cmp2Reg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1935 MachineInstr *Cmp2 =
1936 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1937 TII.get(getFCmpOpcode(CmpInst::FCMP_OEQ, Size)), Cmp2Reg)
1938 .addReg(RHS)
1939 .addReg(RHS);
1941 if (NeedInvert)
1942 TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1943 MachineInstr *And = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1944 TII.get(RISCV::AND), TmpReg)
1945 .addReg(Cmp1Reg)
1946 .addReg(Cmp2Reg);
1948 } else
1949 llvm_unreachable("Unhandled predicate");
1950
1951 // Emit an XORI to invert the result if needed.
1952 if (NeedInvert) {
1953 MachineInstr *Xor = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1954 TII.get(RISCV::XORI), DstReg)
1955 .addReg(TmpReg)
1956 .addImm(1);
1958 }
1959
1960 MI.eraseFromParent();
1961 return true;
1962}
1963
1964void RISCVInstructionSelector::emitFence(AtomicOrdering FenceOrdering,
1965 SyncScope::ID FenceSSID,
1966 MachineInstr &MI) const {
1967 MachineBasicBlock &MBB = *MI.getParent();
1968 DebugLoc DL = MI.getDebugLoc();
1969
1970 if (STI.hasStdExtZtso()) {
1971 // The only fence that needs an instruction is a sequentially-consistent
1972 // cross-thread fence.
1973 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
1974 FenceSSID == SyncScope::System) {
1975 // fence rw, rw
1976 BuildMI(MBB, MI, DL, TII.get(RISCV::FENCE))
1979 return;
1980 }
1981
1982 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
1983 BuildMI(MBB, MI, DL, TII.get(TargetOpcode::MEMBARRIER));
1984 return;
1985 }
1986
1987 // singlethread fences only synchronize with signal handlers on the same
1988 // thread and thus only need to preserve instruction order, not actually
1989 // enforce memory ordering.
1990 if (FenceSSID == SyncScope::SingleThread) {
1991 BuildMI(MBB, MI, DL, TII.get(TargetOpcode::MEMBARRIER));
1992 return;
1993 }
1994
1995 // Refer to Table A.6 in the version 2.3 draft of the RISC-V Instruction Set
1996 // Manual: Volume I.
1997 unsigned Pred, Succ;
1998 switch (FenceOrdering) {
1999 default:
2000 llvm_unreachable("Unexpected ordering");
2001 case AtomicOrdering::AcquireRelease:
2002 // fence acq_rel -> fence.tso
2003 BuildMI(MBB, MI, DL, TII.get(RISCV::FENCE_TSO));
2004 return;
2005 case AtomicOrdering::Acquire:
2006 // fence acquire -> fence r, rw
2007 Pred = RISCVFenceField::R;
2009 break;
2010 case AtomicOrdering::Release:
2011 // fence release -> fence rw, w
2013 Succ = RISCVFenceField::W;
2014 break;
2015 case AtomicOrdering::SequentiallyConsistent:
2016 // fence seq_cst -> fence rw, rw
2019 break;
2020 }
2021 BuildMI(MBB, MI, DL, TII.get(RISCV::FENCE)).addImm(Pred).addImm(Succ);
2022}
2023
2024namespace llvm {
2025InstructionSelector *
2027 const RISCVSubtarget &Subtarget,
2028 const RISCVRegisterBankInfo &RBI) {
2029 return new RISCVInstructionSelector(TM, Subtarget, RBI);
2030}
2031} // end namespace llvm
unsigned const MachineRegisterInfo * MRI
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Provides analysis for querying information about KnownBits during GISel passes.
#define DEBUG_TYPE
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
static bool hasAllWUsers(const MachineInstr &OrigMI, const LoongArchSubtarget &ST, const MachineRegisterInfo &MRI)
static bool hasAllNBitUsers(const MachineInstr &OrigMI, const LoongArchSubtarget &ST, const MachineRegisterInfo &MRI, unsigned OrigBits)
#define I(x, y, z)
Definition MD5.cpp:57
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
MachineInstr unsigned OpIdx
#define P(N)
static StringRef getName(Value *V)
static unsigned selectRegImmLoadStoreOp(unsigned GenericOpc, unsigned OpSize)
Select the RISC-V regimm opcode for the G_LOAD or G_STORE operation GenericOpc, appropriate for the G...
static unsigned selectZalasrLoadStoreOp(unsigned GenericOpc, unsigned OpSize)
Select the RISC-V Zalasr opcode for the G_LOAD or G_STORE operation GenericOpc, appropriate for the G...
static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size)
static bool legalizeFCmpPredicate(Register &LHS, Register &RHS, CmpInst::Predicate &Pred, bool &NeedInvert)
static void getOperandsForBranch(Register CondReg, RISCVCC::CondCode &CC, Register &LHS, Register &RHS, MachineRegisterInfo &MRI)
const SmallVectorImpl< MachineOperand > & Cond
This file declares the targeting of the RegisterBankInfo class for RISC-V.
#define LLVM_DEBUG(...)
Definition Debug.h:114
Value * RHS
Value * LHS
APInt bitcastToAPInt() const
Definition APFloat.h:1408
bool isPosZero() const
Definition APFloat.h:1527
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1503
bool ult(const APInt &RHS) const
Unsigned less than comparison.
Definition APInt.h:1118
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
Definition APInt.h:476
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
Definition APInt.h:287
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition InstrTypes.h:679
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition InstrTypes.h:682
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition InstrTypes.h:684
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition InstrTypes.h:687
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition InstrTypes.h:683
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition InstrTypes.h:685
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition InstrTypes.h:686
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:827
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
Definition Constants.h:174
This is an important base class in LLVM.
Definition Constant.h:43
virtual void setupMF(MachineFunction &mf, GISelValueTracking *vt, CodeGenCoverage *covinfo=nullptr, ProfileSummaryInfo *psi=nullptr, BlockFrequencyInfo *bfi=nullptr)
Setup per-MF executor state.
Register getPointerReg() const
Get the source register of the pointer value.
MachineMemOperand & getMMO() const
Get the MachineMemOperand on this instruction.
LocationSize getMemSizeInBits() const
Returns the size in bits of the memory access.
Register getReg(unsigned Idx) const
Access the Idx'th operand as a register and return it.
constexpr unsigned getScalarSizeInBits() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr bool isVector() const
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
constexpr unsigned getAddressSpace() const
TypeSize getValue() const
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const MachineInstrBuilder & addUse(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addDisp(const MachineOperand &Disp, int64_t off, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addDef(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register definition operand.
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
unsigned getOperandNo(const_mop_iterator I) const
Returns the number of the operand iterator I points to.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
AtomicOrdering getSuccessOrdering() const
Return the atomic ordering requirements for this memory operation.
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Analysis providing profile information.
This class provides the information for the target register banks.
unsigned getXLen() const
std::optional< unsigned > getRealVLen() const
static std::pair< unsigned, unsigned > decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, const RISCVRegisterInfo *TRI)
static unsigned getRegClassIDForVecVT(MVT VT)
static RISCVVType::VLMUL getLMUL(MVT VT)
static const TargetRegisterClass * constrainGenericRegister(Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)
Constrain the (possibly generic) virtual register Reg to RC.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
This class implements the register bank concept.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
bool isPositionIndependent() const
CodeModel::Model getCodeModel() const
Returns the code model.
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
operand_type_match m_Reg()
SpecificConstantMatch m_SpecificICst(const APInt &RequestedValue)
Matches a constant equal to RequestedValue.
operand_type_match m_Pred()
UnaryOp_match< SrcTy, TargetOpcode::G_ZEXT > m_GZExt(const SrcTy &Src)
ConstantMatch< APInt > m_ICst(APInt &Cst)
BinaryOp_match< LHS, RHS, TargetOpcode::G_ADD, true > m_GAdd(const LHS &L, const RHS &R)
OneNonDBGUse_match< SubPat > m_OneNonDBGUse(const SubPat &SP)
CompareOp_match< Pred, LHS, RHS, TargetOpcode::G_ICMP > m_GICmp(const Pred &P, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_SUB > m_GSub(const LHS &L, const RHS &R)
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
BinaryOp_match< LHS, RHS, TargetOpcode::G_SHL, false > m_GShl(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_AND, true > m_GAnd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_LSHR, false > m_GLShr(const LHS &L, const RHS &R)
unsigned getBrCond(CondCode CC, unsigned SelectOpc=0)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
SmallVector< Inst, 8 > InstSeq
Definition RISCVMatInt.h:43
static unsigned decodeVSEW(unsigned VSEW)
LLVM_ABI unsigned getSEWLMULRatio(unsigned SEW, VLMUL VLMul)
LLVM_ABI unsigned encodeVTYPE(VLMUL VLMUL, unsigned SEW, bool TailAgnostic, bool MaskAgnostic, bool AltFmt=false)
static constexpr int64_t VLMaxSentinel
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
Definition LLVMContext.h:55
@ System
Synchronized with respect to all concurrently executing threads.
Definition LLVMContext.h:58
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Offset
Definition DWP.cpp:532
PointerUnion< const TargetRegisterClass *, const RegisterBank * > RegClassOrRegBank
Convenient type to represent either a register class or a register bank.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
bool isStrongerThanMonotonic(AtomicOrdering AO)
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
Definition bit.h:293
LLVM_ABI void constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition Utils.cpp:155
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
Definition bit.h:303
LLVM_ABI MVT getMVTForLLT(LLT Ty)
Get a rough equivalent of an MVT for a given LLT.
InstructionSelector * createRISCVInstructionSelector(const RISCVTargetMachine &TM, const RISCVSubtarget &Subtarget, const RISCVRegisterBankInfo &RBI)
LLVM_ABI std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
Definition Utils.cpp:314
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:202
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void reportGISelFailure(MachineFunction &MF, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel error as a missed optimization remark to the LLVMContext's diagnostic stream.
Definition Utils.cpp:258
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
AtomicOrdering
Atomic ordering for LLVM's memory model.
constexpr T maskTrailingZeros(unsigned N)
Create a bitmask with the N right-most bits set to 0, and all other bits set to 1.
Definition MathExtras.h:94
@ Or
Bitwise or logical OR of integers.
@ Xor
Bitwise or logical XOR of integers.
@ And
Bitwise or logical AND of integers.
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
Definition Utils.cpp:433
constexpr T maskTrailingOnes(unsigned N)
Create a bitmask with the N right-most bits set to 1, and all other bits set to 0.
Definition MathExtras.h:77
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:177
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define MORE()
Definition regcomp.c:246
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.