1 //===- RISCVMatInt.cpp - Immediate materialisation -------------*- C++ -*--===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "RISCVMatInt.h"
10 #include "MCTargetDesc/RISCVMCTargetDesc.h"
11 #include "llvm/ADT/APInt.h"
12 #include "llvm/Support/MathExtras.h"
15 static int getInstSeqCost(RISCVMatInt::InstSeq &Res, bool HasRVC) {
20 for (auto Instr : Res) {
21 // Assume instructions that aren't listed aren't compressible.
22 bool Compressed = false;
31 Compressed = isInt<6>(Instr.Imm);
34 // Two RVC instructions take the same space as one RVI instruction, but
35 // can take longer to execute than the single RVI instruction. Thus, we
36 // consider that two RVC instruction are slightly more costly than one
37 // RVI instruction. For longer sequences of RVC instructions the space
38 // savings can be worth it, though. The costs below try to model that.
40 Cost += 100; // Baseline cost of one RVI instruction: 100%.
42 Cost += 70; // 70% cost of baseline.
47 // Recursively generate a sequence for materializing an integer.
48 static void generateInstSeqImpl(int64_t Val,
49 const FeatureBitset &ActiveFeatures,
50 RISCVMatInt::InstSeq &Res) {
51 bool IsRV64 = ActiveFeatures[RISCV::Feature64Bit];
54 // Depending on the active bits in the immediate Value v, the following
55 // instruction sequences are emitted:
58 // v[0,12) != 0 && v[12,32) == 0 : ADDI
59 // v[0,12) == 0 && v[12,32) != 0 : LUI
60 // v[0,32) != 0 : LUI+ADDI(W)
61 int64_t Hi20 = ((Val + 0x800) >> 12) & 0xFFFFF;
62 int64_t Lo12 = SignExtend64<12>(Val);
65 Res.push_back(RISCVMatInt::Inst(RISCV::LUI, Hi20));
67 if (Lo12 || Hi20 == 0) {
68 unsigned AddiOpc = (IsRV64 && Hi20) ? RISCV::ADDIW : RISCV::ADDI;
69 Res.push_back(RISCVMatInt::Inst(AddiOpc, Lo12));
74 assert(IsRV64 && "Can't emit >32-bit imm for non-RV64 target");
76 // Use BSETI for a single bit.
77 if (ActiveFeatures[RISCV::FeatureStdExtZbs] && isPowerOf2_64(Val)) {
78 Res.push_back(RISCVMatInt::Inst(RISCV::BSETI, Log2_64(Val)));
82 // In the worst case, for a full 64-bit constant, a sequence of 8 instructions
83 // (i.e., LUI+ADDIW+SLLI+ADDI+SLLI+ADDI+SLLI+ADDI) has to be emitted. Note
84 // that the first two instructions (LUI+ADDIW) can contribute up to 32 bits
85 // while the following ADDI instructions contribute up to 12 bits each.
87 // On the first glance, implementing this seems to be possible by simply
88 // emitting the most significant 32 bits (LUI+ADDIW) followed by as many left
89 // shift (SLLI) and immediate additions (ADDI) as needed. However, due to the
90 // fact that ADDI performs a sign extended addition, doing it like that would
91 // only be possible when at most 11 bits of the ADDI instructions are used.
92 // Using all 12 bits of the ADDI instructions, like done by GAS, actually
93 // requires that the constant is processed starting with the least significant
96 // In the following, constants are processed from LSB to MSB but instruction
97 // emission is performed from MSB to LSB by recursively calling
98 // generateInstSeq. In each recursion, first the lowest 12 bits are removed
99 // from the constant and the optimal shift amount, which can be greater than
100 // 12 bits if the constant is sparse, is determined. Then, the shifted
101 // remaining constant is processed recursively and gets emitted as soon as it
102 // fits into 32 bits. The emission of the shifts and additions is subsequently
103 // performed when the recursion returns.
105 int64_t Lo12 = SignExtend64<12>(Val);
106 Val = (uint64_t)Val - (uint64_t)Lo12;
109 bool Unsigned = false;
111 // Val might now be valid for LUI without needing a shift.
112 if (!isInt<32>(Val)) {
113 ShiftAmount = findFirstSet((uint64_t)Val);
116 // If the remaining bits don't fit in 12 bits, we might be able to reduce the
117 // shift amount in order to use LUI which will zero the lower 12 bits.
118 if (ShiftAmount > 12 && !isInt<12>(Val)) {
119 if (isInt<32>((uint64_t)Val << 12)) {
120 // Reduce the shift amount and add zeros to the LSBs so it will match LUI.
122 Val = (uint64_t)Val << 12;
123 } else if (isUInt<32>((uint64_t)Val << 12) &&
124 ActiveFeatures[RISCV::FeatureStdExtZba]) {
125 // Reduce the shift amount and add zeros to the LSBs so it will match
126 // LUI, then shift left with SLLI.UW to clear the upper 32 set bits.
128 Val = ((uint64_t)Val << 12) | (0xffffffffull << 32);
133 // Try to use SLLI_UW for Val when it is uint32 but not int32.
134 if (isUInt<32>((uint64_t)Val) && !isInt<32>((uint64_t)Val) &&
135 ActiveFeatures[RISCV::FeatureStdExtZba]) {
136 // Use LUI+ADDI or LUI to compose, then clear the upper 32 bits with
138 Val = ((uint64_t)Val) | (0xffffffffull << 32);
143 generateInstSeqImpl(Val, ActiveFeatures, Res);
145 // Skip shift if we were able to use LUI directly.
148 Res.push_back(RISCVMatInt::Inst(RISCV::SLLI_UW, ShiftAmount));
150 Res.push_back(RISCVMatInt::Inst(RISCV::SLLI, ShiftAmount));
154 Res.push_back(RISCVMatInt::Inst(RISCV::ADDI, Lo12));
157 static unsigned extractRotateInfo(int64_t Val) {
158 // for case: 0b111..1..xxxxxx1..1..
159 unsigned LeadingOnes = countLeadingOnes((uint64_t)Val);
160 unsigned TrailingOnes = countTrailingOnes((uint64_t)Val);
161 if (TrailingOnes > 0 && TrailingOnes < 64 &&
162 (LeadingOnes + TrailingOnes) > (64 - 12))
163 return 64 - TrailingOnes;
165 // for case: 0bxxx1..1..1...xxx
166 unsigned UpperTrailingOnes = countTrailingOnes(Hi_32(Val));
167 unsigned LowerLeadingOnes = countLeadingOnes(Lo_32(Val));
168 if (UpperTrailingOnes < 32 &&
169 (UpperTrailingOnes + LowerLeadingOnes) > (64 - 12))
170 return 32 - UpperTrailingOnes;
176 namespace RISCVMatInt {
177 InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures) {
178 RISCVMatInt::InstSeq Res;
179 generateInstSeqImpl(Val, ActiveFeatures, Res);
181 // If there are trailing zeros, try generating a sign extended constant with
182 // no trailing zeros and use a final SLLI to restore them.
183 if ((Val & 1) == 0 && Res.size() > 2) {
184 unsigned TrailingZeros = countTrailingZeros((uint64_t)Val);
185 int64_t ShiftedVal = Val >> TrailingZeros;
186 RISCVMatInt::InstSeq TmpSeq;
187 generateInstSeqImpl(ShiftedVal, ActiveFeatures, TmpSeq);
188 TmpSeq.push_back(RISCVMatInt::Inst(RISCV::SLLI, TrailingZeros));
190 // Keep the new sequence if it is an improvement.
191 if (TmpSeq.size() < Res.size()) {
193 // A 2 instruction sequence is the best we can do.
199 // If the constant is positive we might be able to generate a shifted constant
200 // with no leading zeros and use a final SRLI to restore them.
201 if (Val > 0 && Res.size() > 2) {
202 assert(ActiveFeatures[RISCV::Feature64Bit] &&
203 "Expected RV32 to only need 2 instructions");
204 unsigned LeadingZeros = countLeadingZeros((uint64_t)Val);
205 uint64_t ShiftedVal = (uint64_t)Val << LeadingZeros;
206 // Fill in the bits that will be shifted out with 1s. An example where this
207 // helps is trailing one masks with 32 or more ones. This will generate
208 // ADDI -1 and an SRLI.
209 ShiftedVal |= maskTrailingOnes<uint64_t>(LeadingZeros);
211 RISCVMatInt::InstSeq TmpSeq;
212 generateInstSeqImpl(ShiftedVal, ActiveFeatures, TmpSeq);
213 TmpSeq.push_back(RISCVMatInt::Inst(RISCV::SRLI, LeadingZeros));
215 // Keep the new sequence if it is an improvement.
216 if (TmpSeq.size() < Res.size()) {
218 // A 2 instruction sequence is the best we can do.
223 // Some cases can benefit from filling the lower bits with zeros instead.
224 ShiftedVal &= maskTrailingZeros<uint64_t>(LeadingZeros);
226 generateInstSeqImpl(ShiftedVal, ActiveFeatures, TmpSeq);
227 TmpSeq.push_back(RISCVMatInt::Inst(RISCV::SRLI, LeadingZeros));
229 // Keep the new sequence if it is an improvement.
230 if (TmpSeq.size() < Res.size()) {
232 // A 2 instruction sequence is the best we can do.
237 // If we have exactly 32 leading zeros and Zba, we can try using zext.w at
238 // the end of the sequence.
239 if (LeadingZeros == 32 && ActiveFeatures[RISCV::FeatureStdExtZba]) {
240 // Try replacing upper bits with 1.
241 uint64_t LeadingOnesVal = Val | maskLeadingOnes<uint64_t>(LeadingZeros);
243 generateInstSeqImpl(LeadingOnesVal, ActiveFeatures, TmpSeq);
244 TmpSeq.push_back(RISCVMatInt::Inst(RISCV::ADD_UW, 0));
246 // Keep the new sequence if it is an improvement.
247 if (TmpSeq.size() < Res.size()) {
249 // A 2 instruction sequence is the best we can do.
256 // Perform optimization with BCLRI/BSETI in the Zbs extension.
257 if (Res.size() > 2 && ActiveFeatures[RISCV::FeatureStdExtZbs]) {
258 assert(ActiveFeatures[RISCV::Feature64Bit] &&
259 "Expected RV32 to only need 2 instructions");
261 // 1. For values in range 0xffffffff 7fffffff ~ 0xffffffff 00000000,
262 // call generateInstSeqImpl with Val|0x80000000 (which is expected be
263 // an int32), then emit (BCLRI r, 31).
264 // 2. For values in range 0x80000000 ~ 0xffffffff, call generateInstSeqImpl
265 // with Val&~0x80000000 (which is expected to be an int32), then
266 // emit (BSETI r, 31).
271 NewVal = Val | 0x80000000ll;
274 NewVal = Val & ~0x80000000ll;
276 if (isInt<32>(NewVal)) {
277 RISCVMatInt::InstSeq TmpSeq;
278 generateInstSeqImpl(NewVal, ActiveFeatures, TmpSeq);
279 TmpSeq.push_back(RISCVMatInt::Inst(Opc, 31));
280 if (TmpSeq.size() < Res.size())
284 // Try to use BCLRI for upper 32 bits if the original lower 32 bits are
285 // negative int32, or use BSETI for upper 32 bits if the original lower
286 // 32 bits are positive int32.
288 uint32_t Hi = Val >> 32;
290 RISCVMatInt::InstSeq TmpSeq;
291 generateInstSeqImpl(Lo, ActiveFeatures, TmpSeq);
292 // Check if it is profitable to use BCLRI/BSETI.
293 if (Lo > 0 && TmpSeq.size() + countPopulation(Hi) < Res.size()) {
295 } else if (Lo < 0 && TmpSeq.size() + countPopulation(~Hi) < Res.size()) {
299 // Search for each bit and build corresponding BCLRI/BSETI.
302 unsigned Bit = countTrailingZeros(Hi);
303 TmpSeq.push_back(RISCVMatInt::Inst(Opc, Bit + 32));
306 if (TmpSeq.size() < Res.size())
311 // Perform optimization with SH*ADD in the Zba extension.
312 if (Res.size() > 2 && ActiveFeatures[RISCV::FeatureStdExtZba]) {
313 assert(ActiveFeatures[RISCV::Feature64Bit] &&
314 "Expected RV32 to only need 2 instructions");
317 RISCVMatInt::InstSeq TmpSeq;
318 // Select the opcode and divisor.
319 if ((Val % 3) == 0 && isInt<32>(Val / 3)) {
322 } else if ((Val % 5) == 0 && isInt<32>(Val / 5)) {
325 } else if ((Val % 9) == 0 && isInt<32>(Val / 9)) {
329 // Build the new instruction sequence.
331 generateInstSeqImpl(Val / Div, ActiveFeatures, TmpSeq);
332 TmpSeq.push_back(RISCVMatInt::Inst(Opc, 0));
333 if (TmpSeq.size() < Res.size())
336 // Try to use LUI+SH*ADD+ADDI.
337 int64_t Hi52 = ((uint64_t)Val + 0x800ull) & ~0xfffull;
338 int64_t Lo12 = SignExtend64<12>(Val);
340 if (isInt<32>(Hi52 / 3) && (Hi52 % 3) == 0) {
343 } else if (isInt<32>(Hi52 / 5) && (Hi52 % 5) == 0) {
346 } else if (isInt<32>(Hi52 / 9) && (Hi52 % 9) == 0) {
350 // Build the new instruction sequence.
352 // For Val that has zero Lo12 (implies Val equals to Hi52) should has
353 // already been processed to LUI+SH*ADD by previous optimization.
355 "unexpected instruction sequence for immediate materialisation");
356 assert(TmpSeq.empty() && "Expected empty TmpSeq");
357 generateInstSeqImpl(Hi52 / Div, ActiveFeatures, TmpSeq);
358 TmpSeq.push_back(RISCVMatInt::Inst(Opc, 0));
359 TmpSeq.push_back(RISCVMatInt::Inst(RISCV::ADDI, Lo12));
360 if (TmpSeq.size() < Res.size())
366 // Perform optimization with rori in the Zbb extension.
367 if (Res.size() > 2 && ActiveFeatures[RISCV::FeatureStdExtZbb]) {
368 if (unsigned Rotate = extractRotateInfo(Val)) {
369 RISCVMatInt::InstSeq TmpSeq;
371 ((uint64_t)Val >> (64 - Rotate)) | ((uint64_t)Val << Rotate);
372 assert(isInt<12>(NegImm12));
373 TmpSeq.push_back(RISCVMatInt::Inst(RISCV::ADDI, NegImm12));
374 TmpSeq.push_back(RISCVMatInt::Inst(RISCV::RORI, Rotate));
381 int getIntMatCost(const APInt &Val, unsigned Size,
382 const FeatureBitset &ActiveFeatures, bool CompressionCost) {
383 bool IsRV64 = ActiveFeatures[RISCV::Feature64Bit];
384 bool HasRVC = CompressionCost && ActiveFeatures[RISCV::FeatureStdExtC];
385 int PlatRegSize = IsRV64 ? 64 : 32;
387 // Split the constant into platform register sized chunks, and calculate cost
390 for (unsigned ShiftVal = 0; ShiftVal < Size; ShiftVal += PlatRegSize) {
391 APInt Chunk = Val.ashr(ShiftVal).sextOrTrunc(PlatRegSize);
392 InstSeq MatSeq = generateInstSeq(Chunk.getSExtValue(), ActiveFeatures);
393 Cost += getInstSeqCost(MatSeq, HasRVC);
395 return std::max(1, Cost);
398 OpndKind Inst::getOpndKind() const {
401 llvm_unreachable("Unexpected opcode!");
403 return RISCVMatInt::Imm;
405 return RISCVMatInt::RegX0;
409 return RISCVMatInt::RegReg;
418 return RISCVMatInt::RegImm;
422 } // namespace RISCVMatInt