//
// The LLVM Compiler Infrastructure
//
-// This file was developed by a team from the Computer Systems Research
-// Department at The Aerospace Corporation and is distributed under the
-// University of Illinois Open Source License. See LICENSE.TXT for details.
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
// Cell SPU Instructions:
// finally the X-form with the register-register.
//===----------------------------------------------------------------------===//
-let isLoad = 1 in {
- def LQDv16i8:
- RI10Form<0b00101100, (outs VECREG:$rT), (ins memri10:$src),
- "lqd\t$rT, $src", LoadStore,
- [(set (v16i8 VECREG:$rT), (load dform_addr:$src))]>;
-
- def LQDv8i16:
- RI10Form<0b00101100, (outs VECREG:$rT), (ins memri10:$src),
- "lqd\t$rT, $src", LoadStore,
- [(set (v8i16 VECREG:$rT), (load dform_addr:$src))]>;
-
- def LQDv4i32:
- RI10Form<0b00101100, (outs VECREG:$rT), (ins memri10:$src),
- "lqd\t$rT, $src", LoadStore,
- [(set (v4i32 VECREG:$rT), (load dform_addr:$src))]>;
-
- def LQDv2i64:
- RI10Form<0b00101100, (outs VECREG:$rT), (ins memri10:$src),
- "lqd\t$rT, $src", LoadStore,
- [(set (v2i64 VECREG:$rT), (load dform_addr:$src))]>;
-
- def LQDv4f32:
- RI10Form<0b00101100, (outs VECREG:$rT), (ins memri10:$src),
- "lqd\t$rT, $src", LoadStore,
- [(set (v4f32 VECREG:$rT), (load dform_addr:$src))]>;
-
- def LQDv2f64:
- RI10Form<0b00101100, (outs VECREG:$rT), (ins memri10:$src),
- "lqd\t$rT, $src", LoadStore,
- [(set (v2f64 VECREG:$rT), (load dform_addr:$src))]>;
-
- def LQDr128:
- RI10Form<0b00101100, (outs GPRC:$rT), (ins memri10:$src),
- "lqd\t$rT, $src", LoadStore,
- [(set GPRC:$rT, (load dform_addr:$src))]>;
-
- def LQDr64:
- RI10Form<0b00101100, (outs R64C:$rT), (ins memri10:$src),
- "lqd\t$rT, $src", LoadStore,
- [(set R64C:$rT, (load dform_addr:$src))]>;
-
- def LQDr32:
- RI10Form<0b00101100, (outs R32C:$rT), (ins memri10:$src),
- "lqd\t$rT, $src", LoadStore,
- [(set R32C:$rT, (load dform_addr:$src))]>;
-
- // Floating Point
- def LQDf32:
- RI10Form<0b00101100, (outs R32FP:$rT), (ins memri10:$src),
- "lqd\t$rT, $src", LoadStore,
- [(set R32FP:$rT, (load dform_addr:$src))]>;
-
- def LQDf64:
- RI10Form<0b00101100, (outs R64FP:$rT), (ins memri10:$src),
- "lqd\t$rT, $src", LoadStore,
- [(set R64FP:$rT, (load dform_addr:$src))]>;
- // END Floating Point
-
- def LQDr16:
- RI10Form<0b00101100, (outs R16C:$rT), (ins memri10:$src),
- "lqd\t$rT, $src", LoadStore,
- [(set R16C:$rT, (load dform_addr:$src))]>;
-
- def LQAv16i8:
- RI16Form<0b100001100, (outs VECREG:$rT), (ins addr256k:$src),
- "lqa\t$rT, $src", LoadStore,
- [(set (v16i8 VECREG:$rT), (load aform_addr:$src))]>;
-
- def LQAv8i16:
- RI16Form<0b100001100, (outs VECREG:$rT), (ins addr256k:$src),
- "lqa\t$rT, $src", LoadStore,
- [(set (v8i16 VECREG:$rT), (load aform_addr:$src))]>;
-
- def LQAv4i32:
- RI16Form<0b100001100, (outs VECREG:$rT), (ins addr256k:$src),
- "lqa\t$rT, $src", LoadStore,
- [(set (v4i32 VECREG:$rT), (load aform_addr:$src))]>;
-
- def LQAv2i64:
- RI16Form<0b100001100, (outs VECREG:$rT), (ins addr256k:$src),
- "lqa\t$rT, $src", LoadStore,
- [(set (v2i64 VECREG:$rT), (load aform_addr:$src))]>;
-
- def LQAv4f32:
- RI16Form<0b100001100, (outs VECREG:$rT), (ins addr256k:$src),
- "lqa\t$rT, $src", LoadStore,
- [(set (v4f32 VECREG:$rT), (load aform_addr:$src))]>;
-
- def LQAv2f64:
- RI16Form<0b100001100, (outs VECREG:$rT), (ins addr256k:$src),
- "lqa\t$rT, $src", LoadStore,
- [(set (v2f64 VECREG:$rT), (load aform_addr:$src))]>;
-
- def LQAr128:
- RI16Form<0b100001100, (outs GPRC:$rT), (ins addr256k:$src),
- "lqa\t$rT, $src", LoadStore,
- [(set GPRC:$rT, (load aform_addr:$src))]>;
-
- def LQAr64:
- RI16Form<0b100001100, (outs R64C:$rT), (ins addr256k:$src),
- "lqa\t$rT, $src", LoadStore,
- [(set R64C:$rT, (load aform_addr:$src))]>;
-
- def LQAr32:
- RI16Form<0b100001100, (outs R32C:$rT), (ins addr256k:$src),
- "lqa\t$rT, $src", LoadStore,
- [(set R32C:$rT, (load aform_addr:$src))]>;
-
- def LQAf32:
- RI16Form<0b100001100, (outs R32FP:$rT), (ins addr256k:$src),
- "lqa\t$rT, $src", LoadStore,
- [(set R32FP:$rT, (load aform_addr:$src))]>;
-
- def LQAf64:
- RI16Form<0b100001100, (outs R64FP:$rT), (ins addr256k:$src),
- "lqa\t$rT, $src", LoadStore,
- [(set R64FP:$rT, (load aform_addr:$src))]>;
-
- def LQAr16:
- RI16Form<0b100001100, (outs R16C:$rT), (ins addr256k:$src),
- "lqa\t$rT, $src", LoadStore,
- [(set R16C:$rT, (load aform_addr:$src))]>;
-
- def LQXv16i8:
- RRForm<0b00100011100, (outs VECREG:$rT), (ins memrr:$src),
- "lqx\t$rT, $src", LoadStore,
- [(set (v16i8 VECREG:$rT), (load xform_addr:$src))]>;
-
- def LQXv8i16:
- RRForm<0b00100011100, (outs VECREG:$rT), (ins memrr:$src),
- "lqx\t$rT, $src", LoadStore,
- [(set (v8i16 VECREG:$rT), (load xform_addr:$src))]>;
-
- def LQXv4i32:
- RRForm<0b00100011100, (outs VECREG:$rT), (ins memrr:$src),
- "lqx\t$rT, $src", LoadStore,
- [(set (v4i32 VECREG:$rT), (load xform_addr:$src))]>;
-
- def LQXv2i64:
- RRForm<0b00100011100, (outs VECREG:$rT), (ins memrr:$src),
- "lqx\t$rT, $src", LoadStore,
- [(set (v2i64 VECREG:$rT), (load xform_addr:$src))]>;
-
- def LQXv4f32:
- RRForm<0b00100011100, (outs VECREG:$rT), (ins memrr:$src),
- "lqx\t$rT, $src", LoadStore,
- [(set (v4f32 VECREG:$rT), (load xform_addr:$src))]>;
-
- def LQXv2f64:
- RRForm<0b00100011100, (outs VECREG:$rT), (ins memrr:$src),
- "lqx\t$rT, $src", LoadStore,
- [(set (v2f64 VECREG:$rT), (load xform_addr:$src))]>;
-
- def LQXr128:
- RRForm<0b00100011100, (outs GPRC:$rT), (ins memrr:$src),
- "lqx\t$rT, $src", LoadStore,
- [(set GPRC:$rT, (load xform_addr:$src))]>;
-
- def LQXr64:
- RRForm<0b00100011100, (outs R64C:$rT), (ins memrr:$src),
- "lqx\t$rT, $src", LoadStore,
- [(set R64C:$rT, (load xform_addr:$src))]>;
-
- def LQXr32:
- RRForm<0b00100011100, (outs R32C:$rT), (ins memrr:$src),
- "lqx\t$rT, $src", LoadStore,
- [(set R32C:$rT, (load xform_addr:$src))]>;
-
- def LQXf32:
- RRForm<0b00100011100, (outs R32FP:$rT), (ins memrr:$src),
- "lqx\t$rT, $src", LoadStore,
- [(set R32FP:$rT, (load xform_addr:$src))]>;
-
- def LQXf64:
- RRForm<0b00100011100, (outs R64FP:$rT), (ins memrr:$src),
- "lqx\t$rT, $src", LoadStore,
- [(set R64FP:$rT, (load xform_addr:$src))]>;
-
- def LQXr16:
- RRForm<0b00100011100, (outs R16C:$rT), (ins memrr:$src),
- "lqx\t$rT, $src", LoadStore,
- [(set R16C:$rT, (load xform_addr:$src))]>;
+let isSimpleLoad = 1 in {
+ class LoadDFormVec<ValueType vectype>
+ : RI10Form<0b00101100, (outs VECREG:$rT), (ins memri10:$src),
+ "lqd\t$rT, $src",
+ LoadStore,
+ [(set (vectype VECREG:$rT), (load dform_addr:$src))]>
+ { }
+
+ class LoadDForm<RegisterClass rclass>
+ : RI10Form<0b00101100, (outs rclass:$rT), (ins memri10:$src),
+ "lqd\t$rT, $src",
+ LoadStore,
+ [(set rclass:$rT, (load dform_addr:$src))]>
+ { }
+
+ multiclass LoadDForms
+ {
+ def v16i8: LoadDFormVec<v16i8>;
+ def v8i16: LoadDFormVec<v8i16>;
+ def v4i32: LoadDFormVec<v4i32>;
+ def v2i64: LoadDFormVec<v2i64>;
+ def v4f32: LoadDFormVec<v4f32>;
+ def v2f64: LoadDFormVec<v2f64>;
+
+ def r128: LoadDForm<GPRC>;
+ def r64: LoadDForm<R64C>;
+ def r32: LoadDForm<R32C>;
+ def f32: LoadDForm<R32FP>;
+ def f64: LoadDForm<R64FP>;
+ def r16: LoadDForm<R16C>;
+ def r8: LoadDForm<R8C>;
+ }
+
+ class LoadAFormVec<ValueType vectype>
+ : RI16Form<0b100001100, (outs VECREG:$rT), (ins addr256k:$src),
+ "lqa\t$rT, $src",
+ LoadStore,
+ [(set (vectype VECREG:$rT), (load aform_addr:$src))]>
+ { }
+
+ class LoadAForm<RegisterClass rclass>
+ : RI16Form<0b100001100, (outs rclass:$rT), (ins addr256k:$src),
+ "lqa\t$rT, $src",
+ LoadStore,
+ [(set rclass:$rT, (load aform_addr:$src))]>
+ { }
+
+ multiclass LoadAForms
+ {
+ def v16i8: LoadAFormVec<v16i8>;
+ def v8i16: LoadAFormVec<v8i16>;
+ def v4i32: LoadAFormVec<v4i32>;
+ def v2i64: LoadAFormVec<v2i64>;
+ def v4f32: LoadAFormVec<v4f32>;
+ def v2f64: LoadAFormVec<v2f64>;
+
+ def r128: LoadAForm<GPRC>;
+ def r64: LoadAForm<R64C>;
+ def r32: LoadAForm<R32C>;
+ def f32: LoadAForm<R32FP>;
+ def f64: LoadAForm<R64FP>;
+ def r16: LoadAForm<R16C>;
+ def r8: LoadAForm<R8C>;
+ }
+
+ class LoadXFormVec<ValueType vectype>
+ : RRForm<0b00100011100, (outs VECREG:$rT), (ins memrr:$src),
+ "lqx\t$rT, $src",
+ LoadStore,
+ [(set (vectype VECREG:$rT), (load xform_addr:$src))]>
+ { }
+
+ class LoadXForm<RegisterClass rclass>
+ : RRForm<0b00100011100, (outs rclass:$rT), (ins memrr:$src),
+ "lqx\t$rT, $src",
+ LoadStore,
+ [(set rclass:$rT, (load xform_addr:$src))]>
+ { }
+
+ multiclass LoadXForms
+ {
+ def v16i8: LoadXFormVec<v16i8>;
+ def v8i16: LoadXFormVec<v8i16>;
+ def v4i32: LoadXFormVec<v4i32>;
+ def v2i64: LoadXFormVec<v2i64>;
+ def v4f32: LoadXFormVec<v4f32>;
+ def v2f64: LoadXFormVec<v2f64>;
+
+ def r128: LoadXForm<GPRC>;
+ def r64: LoadXForm<R64C>;
+ def r32: LoadXForm<R32C>;
+ def f32: LoadXForm<R32FP>;
+ def f64: LoadXForm<R64FP>;
+ def r16: LoadXForm<R16C>;
+ def r8: LoadXForm<R8C>;
+ }
+
+ defm LQA : LoadAForms;
+ defm LQD : LoadDForms;
+ defm LQX : LoadXForms;
/* Load quadword, PC relative: Not much use at this point in time.
- Might be of use later for relocatable code.
+ Might be of use later for relocatable code. It's effectively the
+ same as LQA, but uses PC-relative addressing.
def LQR : RI16Form<0b111001100, (outs VECREG:$rT), (ins s16imm:$disp),
"lqr\t$rT, $disp", LoadStore,
[(set VECREG:$rT, (load iaddr:$disp))]>;
*/
-
- // Catch-all for unaligned loads:
}
//===----------------------------------------------------------------------===//
// Stores:
//===----------------------------------------------------------------------===//
+class StoreDFormVec<ValueType vectype>
+ : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memri10:$src),
+ "stqd\t$rT, $src",
+ LoadStore,
+ [(store (vectype VECREG:$rT), dform_addr:$src)]>
+{ }
+
+class StoreDForm<RegisterClass rclass>
+ : RI10Form<0b00100100, (outs), (ins rclass:$rT, memri10:$src),
+ "stqd\t$rT, $src",
+ LoadStore,
+ [(store rclass:$rT, dform_addr:$src)]>
+{ }
+
+multiclass StoreDForms
+{
+ def v16i8: StoreDFormVec<v16i8>;
+ def v8i16: StoreDFormVec<v8i16>;
+ def v4i32: StoreDFormVec<v4i32>;
+ def v2i64: StoreDFormVec<v2i64>;
+ def v4f32: StoreDFormVec<v4f32>;
+ def v2f64: StoreDFormVec<v2f64>;
+
+ def r128: StoreDForm<GPRC>;
+ def r64: StoreDForm<R64C>;
+ def r32: StoreDForm<R32C>;
+ def f32: StoreDForm<R32FP>;
+ def f64: StoreDForm<R64FP>;
+ def r16: StoreDForm<R16C>;
+ def r8: StoreDForm<R8C>;
+}
-let isStore = 1 in {
- def STQDv16i8 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memri10:$src),
- "stqd\t$rT, $src", LoadStore,
- [(store (v16i8 VECREG:$rT), dform_addr:$src)]>;
-
- def STQDv8i16 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memri10:$src),
- "stqd\t$rT, $src", LoadStore,
- [(store (v8i16 VECREG:$rT), dform_addr:$src)]>;
-
- def STQDv4i32 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memri10:$src),
- "stqd\t$rT, $src", LoadStore,
- [(store (v4i32 VECREG:$rT), dform_addr:$src)]>;
-
- def STQDv2i64 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memri10:$src),
- "stqd\t$rT, $src", LoadStore,
- [(store (v2i64 VECREG:$rT), dform_addr:$src)]>;
-
- def STQDv4f32 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memri10:$src),
- "stqd\t$rT, $src", LoadStore,
- [(store (v4f32 VECREG:$rT), dform_addr:$src)]>;
-
- def STQDv2f64 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memri10:$src),
- "stqd\t$rT, $src", LoadStore,
- [(store (v2f64 VECREG:$rT), dform_addr:$src)]>;
-
- def STQDr128 : RI10Form<0b00100100, (outs), (ins GPRC:$rT, memri10:$src),
- "stqd\t$rT, $src", LoadStore,
- [(store GPRC:$rT, dform_addr:$src)]>;
-
- def STQDr64 : RI10Form<0b00100100, (outs), (ins R64C:$rT, memri10:$src),
- "stqd\t$rT, $src", LoadStore,
- [(store R64C:$rT, dform_addr:$src)]>;
-
- def STQDr32 : RI10Form<0b00100100, (outs), (ins R32C:$rT, memri10:$src),
- "stqd\t$rT, $src", LoadStore,
- [(store R32C:$rT, dform_addr:$src)]>;
-
- // Floating Point
- def STQDf32 : RI10Form<0b00100100, (outs), (ins R32FP:$rT, memri10:$src),
- "stqd\t$rT, $src", LoadStore,
- [(store R32FP:$rT, dform_addr:$src)]>;
-
- def STQDf64 : RI10Form<0b00100100, (outs), (ins R64FP:$rT, memri10:$src),
- "stqd\t$rT, $src", LoadStore,
- [(store R64FP:$rT, dform_addr:$src)]>;
-
- def STQDr16 : RI10Form<0b00100100, (outs), (ins R16C:$rT, memri10:$src),
- "stqd\t$rT, $src", LoadStore,
- [(store R16C:$rT, dform_addr:$src)]>;
-
- def STQAv16i8 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, addr256k:$src),
- "stqa\t$rT, $src", LoadStore,
- [(store (v16i8 VECREG:$rT), aform_addr:$src)]>;
-
- def STQAv8i16 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, addr256k:$src),
- "stqa\t$rT, $src", LoadStore,
- [(store (v8i16 VECREG:$rT), aform_addr:$src)]>;
-
- def STQAv4i32 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, addr256k:$src),
- "stqa\t$rT, $src", LoadStore,
- [(store (v4i32 VECREG:$rT), aform_addr:$src)]>;
-
- def STQAv2i64 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, addr256k:$src),
- "stqa\t$rT, $src", LoadStore,
- [(store (v2i64 VECREG:$rT), aform_addr:$src)]>;
-
- def STQAv4f32 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, addr256k:$src),
- "stqa\t$rT, $src", LoadStore,
- [(store (v4f32 VECREG:$rT), aform_addr:$src)]>;
-
- def STQAv2f64 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, addr256k:$src),
- "stqa\t$rT, $src", LoadStore,
- [(store (v2f64 VECREG:$rT), aform_addr:$src)]>;
-
- def STQAr128 : RI10Form<0b00100100, (outs), (ins GPRC:$rT, addr256k:$src),
- "stqa\t$rT, $src", LoadStore,
- [(store GPRC:$rT, aform_addr:$src)]>;
-
- def STQAr64 : RI10Form<0b00100100, (outs), (ins R64C:$rT, addr256k:$src),
- "stqa\t$rT, $src", LoadStore,
- [(store R64C:$rT, aform_addr:$src)]>;
-
- def STQAr32 : RI10Form<0b00100100, (outs), (ins R32C:$rT, addr256k:$src),
- "stqa\t$rT, $src", LoadStore,
- [(store R32C:$rT, aform_addr:$src)]>;
-
- // Floating Point
- def STQAf32 : RI10Form<0b00100100, (outs), (ins R32FP:$rT, addr256k:$src),
- "stqa\t$rT, $src", LoadStore,
- [(store R32FP:$rT, aform_addr:$src)]>;
-
- def STQAf64 : RI10Form<0b00100100, (outs), (ins R64FP:$rT, addr256k:$src),
- "stqa\t$rT, $src", LoadStore,
- [(store R64FP:$rT, aform_addr:$src)]>;
-
- def STQXv16i8 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memrr:$src),
- "stqx\t$rT, $src", LoadStore,
- [(store (v16i8 VECREG:$rT), xform_addr:$src)]>;
-
- def STQXv8i16 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memrr:$src),
- "stqx\t$rT, $src", LoadStore,
- [(store (v8i16 VECREG:$rT), xform_addr:$src)]>;
-
- def STQXv4i32 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memrr:$src),
- "stqx\t$rT, $src", LoadStore,
- [(store (v4i32 VECREG:$rT), xform_addr:$src)]>;
-
- def STQXv2i64 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memrr:$src),
- "stqx\t$rT, $src", LoadStore,
- [(store (v2i64 VECREG:$rT), xform_addr:$src)]>;
-
- def STQXv4f32 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memrr:$src),
- "stqx\t$rT, $src", LoadStore,
- [(store (v4f32 VECREG:$rT), xform_addr:$src)]>;
-
- def STQXv2f64 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memrr:$src),
- "stqx\t$rT, $src", LoadStore,
- [(store (v2f64 VECREG:$rT), xform_addr:$src)]>;
-
- def STQXr128 : RI10Form<0b00100100, (outs), (ins GPRC:$rT, memrr:$src),
- "stqx\t$rT, $src", LoadStore,
- [(store GPRC:$rT, xform_addr:$src)]>;
-
- def STQXr64 : RI10Form<0b00100100, (outs), (ins R64C:$rT, memrr:$src),
- "stqx\t$rT, $src", LoadStore,
- [(store R64C:$rT, xform_addr:$src)]>;
-
- def STQXr32 : RI10Form<0b00100100, (outs), (ins R32C:$rT, memrr:$src),
- "stqx\t$rT, $src", LoadStore,
- [(store R32C:$rT, xform_addr:$src)]>;
-
- // Floating Point
- def STQXf32 : RI10Form<0b00100100, (outs), (ins R32FP:$rT, memrr:$src),
- "stqx\t$rT, $src", LoadStore,
- [(store R32FP:$rT, xform_addr:$src)]>;
+class StoreAFormVec<ValueType vectype>
+ : RI16Form<0b0010010, (outs), (ins VECREG:$rT, addr256k:$src),
+ "stqa\t$rT, $src",
+ LoadStore,
+ [(store (vectype VECREG:$rT), aform_addr:$src)]>
+{ }
+
+class StoreAForm<RegisterClass rclass>
+ : RI16Form<0b001001, (outs), (ins rclass:$rT, addr256k:$src),
+ "stqa\t$rT, $src",
+ LoadStore,
+ [(store rclass:$rT, aform_addr:$src)]>
+{ }
+
+multiclass StoreAForms
+{
+ def v16i8: StoreAFormVec<v16i8>;
+ def v8i16: StoreAFormVec<v8i16>;
+ def v4i32: StoreAFormVec<v4i32>;
+ def v2i64: StoreAFormVec<v2i64>;
+ def v4f32: StoreAFormVec<v4f32>;
+ def v2f64: StoreAFormVec<v2f64>;
+
+ def r128: StoreAForm<GPRC>;
+ def r64: StoreAForm<R64C>;
+ def r32: StoreAForm<R32C>;
+ def f32: StoreAForm<R32FP>;
+ def f64: StoreAForm<R64FP>;
+ def r16: StoreAForm<R16C>;
+ def r8: StoreAForm<R8C>;
+}
- def STQXf64 : RI10Form<0b00100100, (outs), (ins R64FP:$rT, memrr:$src),
- "stqx\t$rT, $src", LoadStore,
- [(store R64FP:$rT, xform_addr:$src)]>;
+class StoreXFormVec<ValueType vectype>
+ : RRForm<0b00100100, (outs), (ins VECREG:$rT, memrr:$src),
+ "stqx\t$rT, $src",
+ LoadStore,
+ [(store (vectype VECREG:$rT), xform_addr:$src)]>
+{ }
+
+class StoreXForm<RegisterClass rclass>
+ : RRForm<0b00100100, (outs), (ins rclass:$rT, memrr:$src),
+ "stqx\t$rT, $src",
+ LoadStore,
+ [(store rclass:$rT, xform_addr:$src)]>
+{ }
+
+multiclass StoreXForms
+{
+ def v16i8: StoreXFormVec<v16i8>;
+ def v8i16: StoreXFormVec<v8i16>;
+ def v4i32: StoreXFormVec<v4i32>;
+ def v2i64: StoreXFormVec<v2i64>;
+ def v4f32: StoreXFormVec<v4f32>;
+ def v2f64: StoreXFormVec<v2f64>;
+
+ def r128: StoreXForm<GPRC>;
+ def r64: StoreXForm<R64C>;
+ def r32: StoreXForm<R32C>;
+ def f32: StoreXForm<R32FP>;
+ def f64: StoreXForm<R64FP>;
+ def r16: StoreXForm<R16C>;
+ def r8: StoreXForm<R8C>;
+}
- def STQXr16 : RI10Form<0b00100100, (outs), (ins R16C:$rT, memrr:$src),
- "stqx\t$rT, $src", LoadStore,
- [(store R16C:$rT, xform_addr:$src)]>;
+defm STQD : StoreDForms;
+defm STQA : StoreAForms;
+defm STQX : StoreXForms;
/* Store quadword, PC relative: Not much use at this point in time. Might
be useful for relocatable code.
- def STQR : RI16Form<0b111000100, (outs), (ins VECREG:$rT, s16imm:$disp),
- "stqr\t$rT, $disp", LoadStore,
- [(store VECREG:$rT, iaddr:$disp)]>;
- */
-}
+def STQR : RI16Form<0b111000100, (outs), (ins VECREG:$rT, s16imm:$disp),
+ "stqr\t$rT, $disp", LoadStore,
+ [(store VECREG:$rT, iaddr:$disp)]>;
+*/
//===----------------------------------------------------------------------===//
// Generate Controls for Insertion:
"ilh\t$rT, $val", ImmLoad,
[(set R16C:$rT, immSExt16:$val)]>;
+// Cell SPU doesn't have a native 8-bit immediate load, but ILH works ("with
+// the right constant")
+def ILHr8:
+ RI16Form<0b110000010, (outs R8C:$rT), (ins s16imm_i8:$val),
+ "ilh\t$rT, $val", ImmLoad,
+ [(set R8C:$rT, immSExt8:$val)]>;
+
// IL does sign extension!
def ILr64:
RI16Form<0b100000010, (outs R64C:$rT), (ins s16imm_i64:$val),
RegConstraint<"$rS = $rT">,
NoEncode<"$rS">;
+def IOHLlo:
+ RI16Form<0b100000110, (outs R32C:$rT), (ins R32C:$rS, symbolLo:$val),
+ "iohl\t$rT, $val", ImmLoad,
+ [/* no pattern */]>,
+ RegConstraint<"$rS = $rT">,
+ NoEncode<"$rS">;
+
// Form select mask for bytes using immediate, used in conjunction with the
// SELB instruction:
-def FSMBIv16i8 : RI16Form<0b101001100, (outs VECREG:$rT), (ins u16imm:$val),
- "fsmbi\t$rT, $val", SelectOp,
- [(set (v16i8 VECREG:$rT), (SPUfsmbi_v16i8 immU16:$val))]>;
-
-def FSMBIv8i16 : RI16Form<0b101001100, (outs VECREG:$rT), (ins u16imm:$val),
- "fsmbi\t$rT, $val", SelectOp,
- [(set (v8i16 VECREG:$rT), (SPUfsmbi_v8i16 immU16:$val))]>;
+class FSMBIVec<ValueType vectype>
+ : RI16Form<0b101001100, (outs VECREG:$rT), (ins u16imm:$val),
+ "fsmbi\t$rT, $val",
+ SelectOp,
+ [(set (vectype VECREG:$rT), (SPUfsmbi immU16:$val))]>
+{ }
+
+multiclass FSMBIs
+{
+ def v16i8: FSMBIVec<v16i8>;
+ def v8i16: FSMBIVec<v8i16>;
+ def v4i32: FSMBIVec<v4i32>;
+ def v2i64: FSMBIVec<v2i64>;
+}
-def FSMBIvecv4i32 : RI16Form<0b101001100, (outs VECREG:$rT), (ins u16imm:$val),
- "fsmbi\t$rT, $val", SelectOp,
- [(set (v4i32 VECREG:$rT), (SPUfsmbi_v4i32 immU16:$val))]>;
+defm FSMBI : FSMBIs;
//===----------------------------------------------------------------------===//
// Integer and Logical Operations:
"a\t$rT, $rA, $rB", IntegerOp,
[(set R32C:$rT, (add R32C:$rA, R32C:$rB))]>;
+def Ar8:
+ RRForm<0b00000011000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
+ "a\t$rT, $rA, $rB", IntegerOp,
+ [(set R8C:$rT, (add R8C:$rA, R8C:$rB))]>;
+
def AIvec:
RI10Form<0b00111000, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
"ai\t$rT, $rA, $val", IntegerOp,
[(set (v4i32 VECREG:$rT), (add (v4i32 VECREG:$rA),
v4i32SExt10Imm:$val))]>;
-def AIr32 : RI10Form<0b00111000, (outs R32C:$rT),
- (ins R32C:$rA, s10imm_i32:$val),
- "ai\t$rT, $rA, $val", IntegerOp,
- [(set R32C:$rT, (add R32C:$rA, i32ImmSExt10:$val))]>;
+def AIr32:
+ RI10Form<0b00111000, (outs R32C:$rT), (ins R32C:$rA, s10imm_i32:$val),
+ "ai\t$rT, $rA, $val", IntegerOp,
+ [(set R32C:$rT, (add R32C:$rA, i32ImmSExt10:$val))]>;
-def SFHvec : RRForm<0b00010010000, (outs VECREG:$rT),
- (ins VECREG:$rA, VECREG:$rB),
- "sfh\t$rT, $rA, $rB", IntegerOp,
- [(set (v8i16 VECREG:$rT), (sub (v8i16 VECREG:$rA), (v8i16 VECREG:$rB)))]>;
+def SFHvec:
+ RRForm<0b00010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
+ "sfh\t$rT, $rA, $rB", IntegerOp,
+ [(set (v8i16 VECREG:$rT), (sub (v8i16 VECREG:$rA),
+ (v8i16 VECREG:$rB)))]>;
-def SFHr16 : RRForm<0b00010010000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
- "sfh\t$rT, $rA, $rB", IntegerOp,
- [(set R16C:$rT, (sub R16C:$rA, R16C:$rB))]>;
+def SFHr16:
+ RRForm<0b00010010000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
+ "sfh\t$rT, $rA, $rB", IntegerOp,
+ [(set R16C:$rT, (sub R16C:$rA, R16C:$rB))]>;
def SFHIvec:
RI10Form<0b10110000, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
"xsbh\t$rDst, $rSrc", IntegerOp,
[(set R16C:$rDst, (sext_inreg R16C:$rSrc, i8))]>;
+def XSBHr8:
+ RRForm_1<0b01101101010, (outs R16C:$rDst), (ins R8C:$rSrc),
+ "xsbh\t$rDst, $rSrc", IntegerOp,
+ [(set R16C:$rDst, (sext R8C:$rSrc))]>;
+
// 32-bit form for XSBH: used to sign extend 8-bit quantities to 16-bit
// quantities to 32-bit quantities via a 32-bit register (see the sext 8->32
// pattern below). Intentionally doesn't match a pattern because we want the
"and\t$rT, $rA, $rB", IntegerOp,
[(set R16C:$rT, (and R16C:$rA, R16C:$rB))]>;
+def ANDr8:
+ RRForm<0b10000011000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
+ "and\t$rT, $rA, $rB", IntegerOp,
+ [(set R8C:$rT, (and R8C:$rA, R8C:$rB))]>;
+
// Hacked form of AND to zero-extend 16-bit quantities to 32-bit
// quantities -- see 16->32 zext pattern.
//
"andc\t$rT, $rA, $rB", IntegerOp,
[(set R16C:$rT, (and R16C:$rA, (not R16C:$rB)))]>;
+def ANDCr8:
+ RRForm<0b10000011010, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
+ "andc\t$rT, $rA, $rB", IntegerOp,
+ [(set R8C:$rT, (and R8C:$rA, (not R8C:$rB)))]>;
+
def ANDBIv16i8:
RI10Form<0b01101000, (outs VECREG:$rT), (ins VECREG:$rA, u10imm:$val),
"andbi\t$rT, $rA, $val", IntegerOp,
[(set (v16i8 VECREG:$rT),
(and (v16i8 VECREG:$rA), (v16i8 v16i8U8Imm:$val)))]>;
+def ANDBIr8:
+ RI10Form<0b01101000, (outs R8C:$rT), (ins R8C:$rA, u10imm_i8:$val),
+ "andbi\t$rT, $rA, $val", IntegerOp,
+ [(set R8C:$rT, (and R8C:$rA, immU8:$val))]>;
+
def ANDHIv8i16:
RI10Form<0b10101000, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
"andhi\t$rT, $rA, $val", IntegerOp,
def ANDHIr16:
RI10Form<0b10101000, (outs R16C:$rT), (ins R16C:$rA, s10imm:$val),
"andhi\t$rT, $rA, $val", IntegerOp,
- [(set R16C:$rT, (and R16C:$rA, i16ImmSExt10:$val))]>;
+ [(set R16C:$rT, (and R16C:$rA, i16ImmUns10:$val))]>;
+
+def ANDHI1To2:
+ RI10Form<0b10101000, (outs R16C:$rT), (ins R8C:$rA, s10imm:$val),
+ "andhi\t$rT, $rA, $val", IntegerOp,
+ [(set R16C:$rT, (and (zext R8C:$rA), i16ImmSExt10:$val))]>;
def ANDIv4i32:
RI10Form<0b00101000, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
"andi\t$rT, $rA, $val", IntegerOp,
[(set R32C:$rT, (and R32C:$rA, i32ImmSExt10:$val))]>;
+// Hacked form of ANDI to zero-extend i8 quantities to i32. See the zext 8->32
+// pattern below.
+def ANDI1To4:
+ RI10Form<0b10101000, (outs R32C:$rT), (ins R8C:$rA, s10imm_i32:$val),
+ "andi\t$rT, $rA, $val", IntegerOp,
+ [(set R32C:$rT, (and (zext R8C:$rA), i32ImmSExt10:$val))]>;
+
// Hacked form of ANDI to zero-extend i16 quantities to i32. See the
// zext 16->32 pattern below.
//
"or\t$rT, $rA, $rB", IntegerOp,
[(set R16C:$rT, (or R16C:$rA, R16C:$rB))]>;
+def ORr8:
+ RRForm<0b10000010000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
+ "or\t$rT, $rA, $rB", IntegerOp,
+ [(set R8C:$rT, (or R8C:$rA, R8C:$rB))]>;
+
+// OR instruction forms that are used to copy f32 and f64 registers.
+// They do not match patterns.
+def ORf32:
+ RRForm<0b10000010000, (outs R32FP:$rT), (ins R32FP:$rA, R32FP:$rB),
+ "or\t$rT, $rA, $rB", IntegerOp,
+ [/* no pattern */]>;
+
+def ORf64:
+ RRForm<0b10000010000, (outs R64FP:$rT), (ins R64FP:$rA, R64FP:$rB),
+ "or\t$rT, $rA, $rB", IntegerOp,
+ [/* no pattern */]>;
+
// ORv*_*: Used in scalar->vector promotions:
+def ORv16i8_i8:
+ RRForm<0b10000010000, (outs VECREG:$rT), (ins R8C:$rA, R8C:$rB),
+ "or\t$rT, $rA, $rB", IntegerOp,
+ [/* no pattern */]>;
+
+def : Pat<(v16i8 (SPUpromote_scalar R8C:$rA)),
+ (ORv16i8_i8 R8C:$rA, R8C:$rA)>;
+
def ORv8i16_i16:
RRForm<0b10000010000, (outs VECREG:$rT), (ins R16C:$rA, R16C:$rB),
"or\t$rT, $rA, $rB", IntegerOp,
(ORv2f64_f64 R64FP:$rA, R64FP:$rA)>;
// ORi*_v*: Used to extract vector element 0 (the preferred slot)
+def ORi8_v16i8:
+ RRForm<0b10000010000, (outs R8C:$rT), (ins VECREG:$rA, VECREG:$rB),
+ "or\t$rT, $rA, $rB", IntegerOp,
+ [/* no pattern */]>;
+
+def : Pat<(SPUextract_elt0 (v16i8 VECREG:$rA)),
+ (ORi8_v16i8 VECREG:$rA, VECREG:$rA)>;
+
+def : Pat<(SPUextract_elt0_chained (v16i8 VECREG:$rA)),
+ (ORi8_v16i8 VECREG:$rA, VECREG:$rA)>;
+
def ORi16_v8i16:
RRForm<0b10000010000, (outs R16C:$rT), (ins VECREG:$rA, VECREG:$rB),
"or\t$rT, $rA, $rB", IntegerOp,
"orc\t$rT, $rA, $rB", IntegerOp,
[(set R16C:$rT, (or R16C:$rA, (not R16C:$rB)))]>;
+def ORCr8:
+ RRForm<0b10010010000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
+ "orc\t$rT, $rA, $rB", IntegerOp,
+ [(set R8C:$rT, (or R8C:$rA, (not R8C:$rB)))]>;
+
// OR byte immediate
def ORBIv16i8:
RI10Form<0b01100000, (outs VECREG:$rT), (ins VECREG:$rA, u10imm:$val),
[(set (v16i8 VECREG:$rT),
(or (v16i8 VECREG:$rA), (v16i8 v16i8U8Imm:$val)))]>;
+def ORBIr8:
+ RI10Form<0b01100000, (outs R8C:$rT), (ins R8C:$rA, u10imm_i8:$val),
+ "orbi\t$rT, $rA, $val", IntegerOp,
+ [(set R8C:$rT, (or R8C:$rA, immU8:$val))]>;
+
// OR halfword immediate
def ORHIv8i16:
- RI10Form<0b10100000, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
+ RI10Form<0b10100000, (outs VECREG:$rT), (ins VECREG:$rA, u10imm:$val),
"orhi\t$rT, $rA, $val", IntegerOp,
[(set (v8i16 VECREG:$rT), (or (v8i16 VECREG:$rA),
- v8i16SExt10Imm:$val))]>;
+ v8i16Uns10Imm:$val))]>;
def ORHIr16:
- RI10Form<0b10100000, (outs R16C:$rT), (ins R16C:$rA, s10imm:$val),
+ RI10Form<0b10100000, (outs R16C:$rT), (ins R16C:$rA, u10imm:$val),
"orhi\t$rT, $rA, $val", IntegerOp,
- [(set R16C:$rT, (or R16C:$rA, i16ImmSExt10:$val))]>;
+ [(set R16C:$rT, (or R16C:$rA, i16ImmUns10:$val))]>;
+
+// Hacked form of ORHI used to promote 8-bit registers to 16-bit
+def ORHI1To2:
+ RI10Form<0b10100000, (outs R16C:$rT), (ins R8C:$rA, s10imm:$val),
+ "orhi\t$rT, $rA, $val", IntegerOp,
+ [(set R16C:$rT, (or (anyext R8C:$rA), i16ImmSExt10:$val))]>;
// Bitwise "or" with immediate
def ORIv4i32:
- RI10Form<0b00100000, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
+ RI10Form<0b00100000, (outs VECREG:$rT), (ins VECREG:$rA, u10imm:$val),
"ori\t$rT, $rA, $val", IntegerOp,
[(set (v4i32 VECREG:$rT), (or (v4i32 VECREG:$rA),
- v4i32SExt10Imm:$val))]>;
+ v4i32Uns10Imm:$val))]>;
def ORIr32:
- RI10Form<0b00100000, (outs R32C:$rT), (ins R32C:$rA, s10imm_i32:$val),
- "ori\t$rT, $rA, $val", IntegerOp,
- [(set R32C:$rT, (or R32C:$rA, i32ImmSExt10:$val))]>;
-
-// Hacked forms of or immediate to copy one 32- and 64-bit FP register
-// to another. Do not match patterns.
-def ORIf32:
- RI10Form_1<0b00100000, (outs R32FP:$rT), (ins R32FP:$rA, s10imm_i32:$val),
- "ori\t$rT, $rA, $val", IntegerOp,
- [/* no pattern */]>;
-
-def ORIf64:
- RI10Form_1<0b00100000, (outs R64FP:$rT), (ins R64FP:$rA, s10imm_i32:$val),
+ RI10Form<0b00100000, (outs R32C:$rT), (ins R32C:$rA, u10imm_i32:$val),
"ori\t$rT, $rA, $val", IntegerOp,
- [/* no pattern */]>;
+ [(set R32C:$rT, (or R32C:$rA, i32ImmUns10:$val))]>;
def ORIr64:
RI10Form_1<0b00100000, (outs R64C:$rT), (ins R64C:$rA, s10imm_i32:$val),
"ori\t$rT, $rA, $val", IntegerOp,
[(set R32C:$rT, (or (anyext R16C:$rA), i32ImmSExt10:$val))]>;
+// ORI1To4: Hacked version of the ORI instruction to extend 16-bit quantities
+// to 32-bit quantities. Used exclusively to match "anyext" conversions (vide
+// infra "anyext 16->32" pattern.)
+def ORI1To4:
+ RI10Form<0b00100000, (outs R32C:$rT), (ins R8C:$rA, s10imm_i32:$val),
+ "ori\t$rT, $rA, $val", IntegerOp,
+ [(set R32C:$rT, (or (anyext R8C:$rA), i32ImmSExt10:$val))]>;
+
// ORX: "or" across the vector: or's $rA's word slots leaving the result in
// $rT[0], slots 1-3 are zeroed.
//
-// Needs to match an intrinsic pattern.
+// FIXME: Needs to match an intrinsic pattern.
def ORXv4i32:
RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
"orx\t$rT, $rA, $rB", IntegerOp,
[]>;
+// XOR:
def XORv16i8:
RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
"xor\t$rT, $rA, $rB", IntegerOp,
"xor\t$rT, $rA, $rB", IntegerOp,
[(set R16C:$rT, (xor R16C:$rA, R16C:$rB))]>;
+def XORr8:
+ RRForm<0b10010010000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
+ "xor\t$rT, $rA, $rB", IntegerOp,
+ [(set R8C:$rT, (xor R8C:$rA, R8C:$rB))]>;
+
def XORBIv16i8:
RI10Form<0b01100000, (outs VECREG:$rT), (ins VECREG:$rA, u10imm:$val),
"xorbi\t$rT, $rA, $val", IntegerOp,
[(set (v16i8 VECREG:$rT), (xor (v16i8 VECREG:$rA), v16i8U8Imm:$val))]>;
+def XORBIr8:
+ RI10Form<0b01100000, (outs R8C:$rT), (ins R8C:$rA, u10imm_i8:$val),
+ "xorbi\t$rT, $rA, $val", IntegerOp,
+ [(set R8C:$rT, (xor R8C:$rA, immU8:$val))]>;
+
def XORHIv8i16:
RI10Form<0b10100000, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
"xorhi\t$rT, $rA, $val", IntegerOp,
"nand\t$rT, $rA, $rB", IntegerOp,
[(set R16C:$rT, (not (and R16C:$rA, R16C:$rB)))]>;
+def NANDr8:
+ RRForm<0b10010010000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
+ "nand\t$rT, $rA, $rB", IntegerOp,
+ [(set R8C:$rT, (not (and R8C:$rA, R8C:$rB)))]>;
+
// NOR:
def NORv16i8:
RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
"nor\t$rT, $rA, $rB", IntegerOp,
[(set R16C:$rT, (not (or R16C:$rA, R16C:$rB)))]>;
+def NORr8:
+ RRForm<0b10010010000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
+ "nor\t$rT, $rA, $rB", IntegerOp,
+ [(set R8C:$rT, (not (or R8C:$rA, R8C:$rB)))]>;
+
// EQV: Equivalence (1 for each same bit, otherwise 0)
def EQVv16i8:
RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
def : Pat<(xor (not R16C:$rA), R16C:$rB),
(EQVr16 R16C:$rA, R16C:$rB)>;
+def EQVr8:
+ RRForm<0b10010010000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
+ "eqv\t$rT, $rA, $rB", IntegerOp,
+ [(set R8C:$rT, (or (and R8C:$rA, R8C:$rB),
+ (and (not R8C:$rA), (not R8C:$rB))))]>;
+
+def : Pat<(xor R8C:$rA, (not R8C:$rB)),
+ (EQVr8 R8C:$rA, R8C:$rB)>;
+
+def : Pat<(xor (not R8C:$rA), R8C:$rB),
+ (EQVr8 R8C:$rA, R8C:$rB)>;
+
// gcc optimizes (p & q) | (~p & ~q) -> ~(p | q) | (p & q), so match that
// pattern also:
def : Pat<(or (vnot (or (v16i8 VECREG:$rA), (v16i8 VECREG:$rB))),
def : Pat<(or (not (or R16C:$rA, R16C:$rB)), (and R16C:$rA, R16C:$rB)),
(EQVr16 R16C:$rA, R16C:$rB)>;
+def : Pat<(or (not (or R8C:$rA, R8C:$rB)), (and R8C:$rA, R8C:$rB)),
+ (EQVr8 R8C:$rA, R8C:$rB)>;
+
// Select bits:
def SELBv16i8:
RRRForm<0b1000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
def : Pat<(or (and (not R16C:$rC), R16C:$rA),
(and R16C:$rC, R16C:$rB)),
(SELBr16 R16C:$rA, R16C:$rB, R16C:$rC)>;
+
+def SELBr8:
+ RRRForm<0b1000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB, R8C:$rC),
+ "selb\t$rT, $rA, $rB, $rC", IntegerOp,
+ []>;
+
+def : Pat<(or (and R8C:$rA, R8C:$rC),
+ (and R8C:$rB, (not R8C:$rC))),
+ (SELBr8 R8C:$rA, R8C:$rB, R8C:$rC)>;
+
+def : Pat<(or (and R8C:$rC, R8C:$rA),
+ (and R8C:$rB, (not R8C:$rC))),
+ (SELBr8 R8C:$rA, R8C:$rB, R8C:$rC)>;
+
+def : Pat<(or (and R8C:$rA, R8C:$rC),
+ (and (not R8C:$rC), R8C:$rB)),
+ (SELBr8 R8C:$rA, R8C:$rB, R8C:$rC)>;
+
+def : Pat<(or (and R8C:$rC, R8C:$rA),
+ (and (not R8C:$rC), R8C:$rB)),
+ (SELBr8 R8C:$rA, R8C:$rB, R8C:$rC)>;
+
+def : Pat<(or (and R8C:$rA, (not R8C:$rC)),
+ (and R8C:$rB, R8C:$rC)),
+ (SELBr8 R8C:$rA, R8C:$rB, R8C:$rC)>;
+
+def : Pat<(or (and R8C:$rA, (not R8C:$rC)),
+ (and R8C:$rC, R8C:$rB)),
+ (SELBr8 R8C:$rA, R8C:$rB, R8C:$rC)>;
+
+def : Pat<(or (and (not R8C:$rC), R8C:$rA),
+ (and R8C:$rB, R8C:$rC)),
+ (SELBr8 R8C:$rA, R8C:$rB, R8C:$rC)>;
+
+def : Pat<(or (and (not R8C:$rC), R8C:$rA),
+ (and R8C:$rC, R8C:$rB)),
+ (SELBr8 R8C:$rA, R8C:$rB, R8C:$rC)>;
//===----------------------------------------------------------------------===//
// Vector shuffle...
def SHUFB:
RRRForm<0b1000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
"shufb\t$rT, $rA, $rB, $rC", IntegerOp,
- [/* insert intrinsic here */]>;
+ [/* no pattern */]>;
// SPUshuffle is generated in LowerVECTOR_SHUFFLE and gets replaced with SHUFB.
// See the SPUshuffle SDNode operand above, which sets up the DAG pattern
def : Pat<(SPUshuffle (v4i32 VECREG:$rA), (v4i32 VECREG:$rB), VECREG:$rC),
(SHUFB VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
+def : Pat<(SPUshuffle (v4f32 VECREG:$rA), (v4f32 VECREG:$rB), VECREG:$rC),
+ (SHUFB VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
+
def : Pat<(SPUshuffle (v2i64 VECREG:$rA), (v2i64 VECREG:$rB), VECREG:$rC),
(SHUFB VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
+def : Pat<(SPUshuffle (v2f64 VECREG:$rA), (v2f64 VECREG:$rB), VECREG:$rC),
+ (SHUFB VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
+
//===----------------------------------------------------------------------===//
// Shift and rotate group:
//===----------------------------------------------------------------------===//
[(set R16C:$rT, (shl R16C:$rA, R32C:$rB))]>;
def SHLHIv8i16:
- RI7Form<0b11111010000, (outs VECREG:$rT), (ins VECREG:$rA, u7imm:$val),
+ RI7Form<0b11111010000, (outs VECREG:$rT), (ins VECREG:$rA, u7imm_i8:$val),
"shlhi\t$rT, $rA, $val", RotateShift,
[(set (v8i16 VECREG:$rT),
- (SPUvec_shl_v8i16 (v8i16 VECREG:$rA), (i16 uimm7:$val)))]>;
+ (SPUvec_shl_v8i16 (v8i16 VECREG:$rA), (i8 uimm7:$val)))]>;
+
+def : Pat<(SPUvec_shl_v8i16 (v8i16 VECREG:$rA), (i16 uimm7:$val)),
+ (SHLHIv8i16 VECREG:$rA, imm:$val)>;
def : Pat<(SPUvec_shl_v8i16 (v8i16 VECREG:$rA), (i32 uimm7:$val)),
(SHLHIv8i16 VECREG:$rA, imm:$val)>;
RI7Form<0b11111010000, (outs R16C:$rT), (ins R16C:$rA, u7imm_i32:$val),
"shlhi\t$rT, $rA, $val", RotateShift,
[(set R16C:$rT, (shl R16C:$rA, (i32 uimm7:$val)))]>;
+
+def : Pat<(shl R16C:$rA, (i8 uimm7:$val)),
+ (SHLHIr16 R16C:$rA, uimm7:$val)>;
def : Pat<(shl R16C:$rA, (i16 uimm7:$val)),
(SHLHIr16 R16C:$rA, uimm7:$val)>;
[(set R32C:$rT, (shl R32C:$rA, R32C:$rB))]>;
def SHLIv4i32:
- RI7Form<0b11111010000, (outs VECREG:$rT), (ins VECREG:$rA, u7imm:$val),
+ RI7Form<0b11111010000, (outs VECREG:$rT), (ins VECREG:$rA, u7imm_i8:$val),
"shli\t$rT, $rA, $val", RotateShift,
[(set (v4i32 VECREG:$rT),
- (SPUvec_shl_v4i32 (v4i32 VECREG:$rA), (i16 uimm7:$val)))]>;
+ (SPUvec_shl_v4i32 (v4i32 VECREG:$rA), (i8 uimm7:$val)))]>;
+
+def: Pat<(SPUvec_shl_v4i32 (v4i32 VECREG:$rA), (i16 uimm7:$val)),
+ (SHLIv4i32 VECREG:$rA, uimm7:$val)>;
def: Pat<(SPUvec_shl_v4i32 (v4i32 VECREG:$rA), (i32 uimm7:$val)),
(SHLIv4i32 VECREG:$rA, uimm7:$val)>;
def : Pat<(shl R32C:$rA, (i16 uimm7:$val)),
(SHLIr32 R32C:$rA, uimm7:$val)>;
+def : Pat<(shl R32C:$rA, (i8 uimm7:$val)),
+ (SHLIr32 R32C:$rA, uimm7:$val)>;
+
// SHLQBI vec form: Note that this will shift the entire vector (the 128-bit
// register) to the left. Vector form is here to ensure type correctness.
def SHLQBIvec:
"roth\t$rT, $rA, $rB", RotateShift,
[(set R16C:$rT, (rotl R16C:$rA, R32C:$rB))]>;
+// The rotate amount is in the same bits whether we've got an 8-bit, 16-bit or
+// 32-bit register
+def ROTHr16_r8:
+ RRForm<0b00111010000, (outs R16C:$rT), (ins R16C:$rA, R8C:$rB),
+ "roth\t$rT, $rA, $rB", RotateShift,
+ [(set R16C:$rT, (rotl R16C:$rA, (i32 (zext R8C:$rB))))]>;
+
+def : Pat<(rotl R16C:$rA, (i32 (sext R8C:$rB))),
+ (ROTHr16_r8 R16C:$rA, R8C:$rB)>;
+
+def : Pat<(rotl R16C:$rA, (i32 (zext R8C:$rB))),
+ (ROTHr16_r8 R16C:$rA, R8C:$rB)>;
+
+def : Pat<(rotl R16C:$rA, (i32 (anyext R8C:$rB))),
+ (ROTHr16_r8 R16C:$rA, R8C:$rB)>;
+
def ROTHIv8i16:
- RI7Form<0b00111110000, (outs VECREG:$rT), (ins VECREG:$rA, u7imm:$val),
+ RI7Form<0b00111110000, (outs VECREG:$rT), (ins VECREG:$rA, u7imm_i8:$val),
"rothi\t$rT, $rA, $val", RotateShift,
[(set (v8i16 VECREG:$rT),
- (SPUvec_rotl_v8i16 VECREG:$rA, (i16 uimm7:$val)))]>;
+ (SPUvec_rotl_v8i16 VECREG:$rA, (i8 uimm7:$val)))]>;
def : Pat<(SPUvec_rotl_v8i16 VECREG:$rA, (i16 uimm7:$val)),
(ROTHIv8i16 VECREG:$rA, imm:$val)>;
"rothi\t$rT, $rA, $val", RotateShift,
[(set R16C:$rT, (rotl R16C:$rA, (i32 uimm7:$val)))]>;
+def ROTHIr16_i8:
+ RI7Form<0b00111110000, (outs R16C:$rT), (ins R16C:$rA, u7imm_i8:$val),
+ "rothi\t$rT, $rA, $val", RotateShift,
+ [(set R16C:$rT, (rotl R16C:$rA, (i8 uimm7:$val)))]>;
+
def ROTv4i32:
RRForm<0b00011010000, (outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB),
"rot\t$rT, $rA, $rB", RotateShift,
"rot\t$rT, $rA, $rB", RotateShift,
[(set R32C:$rT, (rotl R32C:$rA, R32C:$rB))]>;
+// The rotate amount is in the same bits whether we've got an 8-bit, 16-bit or
+// 32-bit register
+def ROTr32_r16_anyext:
+ RRForm<0b00011010000, (outs R32C:$rT), (ins R32C:$rA, R16C:$rB),
+ "rot\t$rT, $rA, $rB", RotateShift,
+ [(set R32C:$rT, (rotl R32C:$rA, (i32 (anyext R16C:$rB))))]>;
+
+def : Pat<(rotl R32C:$rA, (i32 (zext R16C:$rB))),
+ (ROTr32_r16_anyext R32C:$rA, R16C:$rB)>;
+
+def : Pat<(rotl R32C:$rA, (i32 (sext R16C:$rB))),
+ (ROTr32_r16_anyext R32C:$rA, R16C:$rB)>;
+
+def ROTr32_r8_anyext:
+ RRForm<0b00011010000, (outs R32C:$rT), (ins R32C:$rA, R8C:$rB),
+ "rot\t$rT, $rA, $rB", RotateShift,
+ [(set R32C:$rT, (rotl R32C:$rA, (i32 (anyext R8C:$rB))))]>;
+
+def : Pat<(rotl R32C:$rA, (i32 (zext R8C:$rB))),
+ (ROTr32_r8_anyext R32C:$rA, R8C:$rB)>;
+
+def : Pat<(rotl R32C:$rA, (i32 (sext R8C:$rB))),
+ (ROTr32_r8_anyext R32C:$rA, R8C:$rB)>;
+
def ROTIv4i32:
RI7Form<0b00011110000, (outs VECREG:$rT), (ins VECREG:$rA, u7imm_i32:$val),
"roti\t$rT, $rA, $val", RotateShift,
def : Pat<(SPUvec_rotl_v4i32 (v4i32 VECREG:$rA), (i16 uimm7:$val)),
(ROTIv4i32 VECREG:$rA, imm:$val)>;
+def : Pat<(SPUvec_rotl_v4i32 (v4i32 VECREG:$rA), (i8 uimm7:$val)),
+ (ROTIv4i32 VECREG:$rA, imm:$val)>;
+
def ROTIr32:
RI7Form<0b00011110000, (outs R32C:$rT), (ins R32C:$rA, u7imm_i32:$val),
"roti\t$rT, $rA, $val", RotateShift,
"roti\t$rT, $rA, $val", RotateShift,
[(set R32C:$rT, (rotl R32C:$rA, (i16 uimm7:$val)))]>;
+def ROTIr32_i8:
+ RI7Form<0b00111110000, (outs R32C:$rT), (ins R32C:$rA, u7imm_i8:$val),
+ "roti\t$rT, $rA, $val", RotateShift,
+ [(set R32C:$rT, (rotl R32C:$rA, (i8 uimm7:$val)))]>;
+
// ROTQBY* vector forms: This rotates the entire vector, but vector registers
// are used here for type checking (instances where ROTQBI is used actually
// use vector registers)
def ROTQBYvec:
- RRForm<0b00111011100, (outs VECREG:$rT), (ins VECREG:$rA, R16C:$rB),
+ RRForm<0b00111011100, (outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB),
"rotqby\t$rT, $rA, $rB", RotateShift,
- [(set (v16i8 VECREG:$rT), (SPUrotbytes_left (v16i8 VECREG:$rA), R16C:$rB))]>;
+ [(set (v16i8 VECREG:$rT), (SPUrotbytes_left (v16i8 VECREG:$rA), R32C:$rB))]>;
-def : Pat<(SPUrotbytes_left_chained (v16i8 VECREG:$rA), R16C:$rB),
- (ROTQBYvec VECREG:$rA, R16C:$rB)>;
+def : Pat<(SPUrotbytes_left_chained (v16i8 VECREG:$rA), R32C:$rB),
+ (ROTQBYvec VECREG:$rA, R32C:$rB)>;
// See ROTQBY note above.
def ROTQBYIvec:
(ROTHMv8i16 VECREG:$rA,
(SFIr32 (XSHWr16 R16C:$rB), 0))>;
-def : Pat<(SPUvec_srl_v8i16 (v8i16 VECREG:$rA), /* R8C */ R16C:$rB),
+def : Pat<(SPUvec_srl_v8i16 (v8i16 VECREG:$rA), R8C:$rB),
(ROTHMv8i16 VECREG:$rA,
- (SFIr32 (XSHWr16 /* (XSBHr8 R8C */ R16C:$rB) /*)*/, 0))>;
+ (SFIr32 (XSHWr16 (XSBHr8 R8C:$rB) ), 0))>;
// ROTHM r16 form: Rotate 16-bit quantity to right, zero fill at the left
// Note: This instruction doesn't match a pattern because rB must be negated
(ROTHMr16 R16C:$rA,
(SFIr32 (XSHWr16 R16C:$rB), 0))>;
-def : Pat<(srl R16C:$rA, /* R8C */ R16C:$rB),
+def : Pat<(srl R16C:$rA, R8C:$rB),
(ROTHMr16 R16C:$rA,
- (SFIr32 (XSHWr16 /* (XSBHr8 R8C */ R16C:$rB) /* ) */, 0))>;
+ (SFIr32 (XSHWr16 (XSBHr8 R8C:$rB) ), 0))>;
// ROTHMI v8i16 form: See the comment for ROTHM v8i16. The difference here is
// that the immediate can be complemented, so that the user doesn't have to
def: Pat<(SPUvec_srl_v8i16 (v8i16 VECREG:$rA), (i16 imm:$val)),
(ROTHMIv8i16 VECREG:$rA, imm:$val)>;
+
+def: Pat<(SPUvec_srl_v8i16 (v8i16 VECREG:$rA), (i8 imm:$val)),
+ (ROTHMIv8i16 VECREG:$rA, imm:$val)>;
def ROTHMIr16:
RI7Form<0b10111110000, (outs R16C:$rT), (ins R16C:$rA, rothNeg7imm:$val),
def: Pat<(srl R16C:$rA, (i16 uimm7:$val)),
(ROTHMIr16 R16C:$rA, uimm7:$val)>;
+def: Pat<(srl R16C:$rA, (i8 uimm7:$val)),
+ (ROTHMIr16 R16C:$rA, uimm7:$val)>;
+
// ROTM v4i32 form: See the ROTHM v8i16 comments.
def ROTMv4i32:
RRForm<0b10011010000, (outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB),
(ROTMr32 R32C:$rA,
(SFIr32 (XSHWr16 R16C:$rB), 0))>;
+def : Pat<(srl R32C:$rA, R8C:$rB),
+ (ROTMr32 R32C:$rA,
+ (SFIr32 (XSHWr16 (XSBHr8 R8C:$rB)), 0))>;
+
// ROTMI v4i32 form: See the comment for ROTHM v8i16.
def ROTMIv4i32:
RI7Form<0b10011110000, (outs VECREG:$rT), (ins VECREG:$rA, rotNeg7imm:$val),
def : Pat<(SPUvec_srl_v4i32 VECREG:$rA, (i16 uimm7:$val)),
(ROTMIv4i32 VECREG:$rA, uimm7:$val)>;
+
+def : Pat<(SPUvec_srl_v4i32 VECREG:$rA, (i8 uimm7:$val)),
+ (ROTMIv4i32 VECREG:$rA, uimm7:$val)>;
// ROTMI r32 form: know how to complement the immediate value.
def ROTMIr32:
def : Pat<(srl R32C:$rA, (i16 imm:$val)),
(ROTMIr32 R32C:$rA, uimm7:$val)>;
+def : Pat<(srl R32C:$rA, (i8 imm:$val)),
+ (ROTMIr32 R32C:$rA, uimm7:$val)>;
+
// ROTQMBYvec: This is a vector form merely so that when used in an
// instruction pattern, type checking will succeed. This instruction assumes
// that the user knew to complement $rB.
(ROTMAHv8i16 VECREG:$rA,
(SFIr32 (XSHWr16 R16C:$rB), 0))>;
+def : Pat<(SPUvec_sra_v8i16 VECREG:$rA, R8C:$rB),
+ (ROTMAHv8i16 VECREG:$rA,
+ (SFIr32 (XSHWr16 (XSBHr8 R8C:$rB)), 0))>;
+
def ROTMAHr16:
RRForm<0b01111010000, (outs R16C:$rT), (ins R16C:$rA, R32C:$rB),
"rotmah\t$rT, $rA, $rB", RotateShift,
(ROTMAHr16 R16C:$rA,
(SFIr32 (XSHWr16 R16C:$rB), 0))>;
+def : Pat<(sra R16C:$rA, R8C:$rB),
+ (ROTMAHr16 R16C:$rA,
+ (SFIr32 (XSHWr16 (XSBHr8 R8C:$rB)), 0))>;
+
def ROTMAHIv8i16:
RRForm<0b01111110000, (outs VECREG:$rT), (ins VECREG:$rA, rothNeg7imm:$val),
"rotmahi\t$rT, $rA, $val", RotateShift,
def : Pat<(SPUvec_sra_v8i16 (v8i16 VECREG:$rA), (i16 uimm7:$val)),
(ROTMAHIv8i16 (v8i16 VECREG:$rA), (i32 uimm7:$val))>;
+def : Pat<(SPUvec_sra_v8i16 (v8i16 VECREG:$rA), (i8 uimm7:$val)),
+ (ROTMAHIv8i16 (v8i16 VECREG:$rA), (i32 uimm7:$val))>;
+
def ROTMAHIr16:
RRForm<0b01111110000, (outs R16C:$rT), (ins R16C:$rA, rothNeg7imm_i16:$val),
"rotmahi\t$rT, $rA, $val", RotateShift,
def : Pat<(sra R16C:$rA, (i32 imm:$val)),
(ROTMAHIr16 R16C:$rA, uimm7:$val)>;
+def : Pat<(sra R16C:$rA, (i8 imm:$val)),
+ (ROTMAHIr16 R16C:$rA, uimm7:$val)>;
+
def ROTMAv4i32:
RRForm<0b01011010000, (outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB),
"rotma\t$rT, $rA, $rB", RotateShift,
(ROTMAv4i32 (v4i32 VECREG:$rA),
(SFIr32 (XSHWr16 R16C:$rB), 0))>;
+def : Pat<(SPUvec_sra_v4i32 VECREG:$rA, R8C:$rB),
+ (ROTMAv4i32 (v4i32 VECREG:$rA),
+ (SFIr32 (XSHWr16 (XSBHr8 R8C:$rB)), 0))>;
+
def ROTMAr32:
RRForm<0b01011010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
"rotma\t$rT, $rA, $rB", RotateShift,
(ROTMAr32 R32C:$rA,
(SFIr32 (XSHWr16 R16C:$rB), 0))>;
+def : Pat<(sra R32C:$rA, R8C:$rB),
+ (ROTMAr32 R32C:$rA,
+ (SFIr32 (XSHWr16 (XSBHr8 R8C:$rB)), 0))>;
+
def ROTMAIv4i32:
RRForm<0b01011110000, (outs VECREG:$rT), (ins VECREG:$rA, rotNeg7imm:$val),
"rotmai\t$rT, $rA, $val", RotateShift,
def : Pat<(sra R32C:$rA, (i16 uimm7:$val)),
(ROTMAIr32 R32C:$rA, uimm7:$val)>;
+def : Pat<(sra R32C:$rA, (i8 uimm7:$val)),
+ (ROTMAIr32 R32C:$rA, uimm7:$val)>;
+
//===----------------------------------------------------------------------===//
// Branch and conditionals:
//===----------------------------------------------------------------------===//
}
// Comparison operators:
+def CEQBr8:
+ RRForm<0b00001011110, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
+ "ceqb\t$rT, $rA, $rB", ByteOp,
+ [/* no pattern to match */]>;
def CEQBv16i8:
RRForm<0b00001011110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
"ceqb\t$rT, $rA, $rB", ByteOp,
[/* no pattern to match: intrinsic */]>;
+def CEQBIr8:
+ RI10Form<0b01111110, (outs R8C:$rT), (ins R8C:$rA, s7imm_i8:$val),
+ "ceqbi\t$rT, $rA, $val", ByteOp,
+ [/* no pattern to match: intrinsic */]>;
+
def CEQBIv16i8:
- RI10Form<0b01111110, (outs VECREG:$rT), (ins VECREG:$rA, s7imm:$val),
+ RI10Form<0b01111110, (outs VECREG:$rT), (ins VECREG:$rA, s7imm_i8:$val),
"ceqbi\t$rT, $rA, $val", ByteOp,
[/* no pattern to match: intrinsic */]>;
def CEQr32:
RRForm<0b00000011110, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
"ceq\t$rT, $rA, $rB", ByteOp,
- [/* no pattern to match: intrinsic */]>;
+ [(set R32C:$rT, (seteq R32C:$rA, R32C:$rB))]>;
def CEQv4i32:
RRForm<0b00000011110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
"ceq\t$rT, $rA, $rB", ByteOp,
- [/* no pattern to match: intrinsic */]>;
+ [(set (v4i32 VECREG:$rT), (seteq (v4i32 VECREG:$rA), (v4i32 VECREG:$rB)))]>;
def CEQIr32:
- RI10Form<0b00111110, (outs R32C:$rT), (ins R32C:$rA, s10imm:$val),
+ RI10Form<0b00111110, (outs R32C:$rT), (ins R32C:$rA, s10imm_i32:$val),
"ceqi\t$rT, $rA, $val", ByteOp,
- [/* no pattern to match: intrinsic */]>;
+ [(set R32C:$rT, (seteq R32C:$rA, i32ImmSExt10:$val))]>;
def CEQIv4i32:
RI10Form<0b00111110, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
def BRASL:
BranchSetLink<0b011001100, (outs), (ins calltarget:$func, variable_ops),
"brasl\t$$lr, $func",
- [(SPUcall tglobaladdr:$func)]>;
+ [(SPUcall (SPUaform tglobaladdr:$func, 0))]>;
// Branch indirect and set link if external data. These instructions are not
// actually generated, matched by an intrinsic:
*/
}
+//===----------------------------------------------------------------------===//
+// setcc and brcond patterns:
+//===----------------------------------------------------------------------===//
+
def : Pat<(brcond (i16 (seteq R16C:$rA, 0)), bb:$dest),
(BRHZ R16C:$rA, bb:$dest)>;
-def : Pat<(brcond (i16 (setne R16C:$rA, 0)), bb:$dest),
- (BRHNZ R16C:$rA, bb:$dest)>;
-
def : Pat<(brcond (i32 (seteq R32C:$rA, 0)), bb:$dest),
(BRZ R32C:$rA, bb:$dest)>;
+
+def : Pat<(brcond (i16 (setne R16C:$rA, 0)), bb:$dest),
+ (BRHNZ R16C:$rA, bb:$dest)>;
def : Pat<(brcond (i32 (setne R32C:$rA, 0)), bb:$dest),
- (BRZ R32C:$rA, bb:$dest)>;
+ (BRNZ R32C:$rA, bb:$dest)>;
+
+def : Pat<(brcond (i16 (setne R16C:$rA, i16ImmSExt10:$val)), bb:$dest),
+ (BRHNZ (CEQHIr16 R16C:$rA, i16ImmSExt10:$val), bb:$dest)>;
+def : Pat<(brcond (i32 (setne R32C:$rA, i32ImmSExt10:$val)), bb:$dest),
+ (BRNZ (CEQIr32 R32C:$rA, i32ImmSExt10:$val), bb:$dest)>;
+
+def : Pat<(brcond (i16 (setne R16C:$rA, R16C:$rB)), bb:$dest),
+ (BRHNZ (CEQHr16 R16C:$rA, R16:$rB), bb:$dest)>;
+def : Pat<(brcond (i32 (setne R32C:$rA, R32C:$rB)), bb:$dest),
+ (BRNZ (CEQr32 R32C:$rA, R32C:$rB), bb:$dest)>;
let isTerminator = 1, isBarrier = 1 in {
let isReturn = 1 in {
}
}
-//===----------------------------------------------------------------------===//
-// Various brcond predicates:
-//===----------------------------------------------------------------------===//
-/*
-def : Pat<(brcond (i32 (seteq R32C:$rA, 0)), bb:$dest),
- (BRZ R32C:$rA, bb:$dest)>;
-
-def : Pat<(brcond (i32 (seteq R32C:$rA, R32C:$rB)), bb:$dest),
- (BRNZ (CEQr32 R32C:$rA, R32C:$rB), bb:$dest)>;
-
-def : Pat<(brcond (i16 (seteq R16C:$rA, i16ImmSExt10:$val)), bb:$dest),
- (BRHNZ (CEQHIr16 R16C:$rA, i16ImmSExt10:$val), bb:$dest)>;
-
-def : Pat<(brcond (i16 (seteq R16C:$rA, R16C:$rB)), bb:$dest),
- (BRHNZ (CEQHr16 R16C:$rA, R16C:$rB), bb:$dest)>;
-*/
-
//===----------------------------------------------------------------------===//
// Single precision floating point instructions
//===----------------------------------------------------------------------===//
def : Pat<(v2f64 (bitconvert (v2f64 VECREG:$src))), (v2f64 VECREG:$src)>;
def : Pat<(f32 (bitconvert (i32 R32C:$src))), (f32 R32FP:$src)>;
+def : Pat<(f64 (bitconvert (i64 R64C:$src))), (f64 R64FP:$src)>;
//===----------------------------------------------------------------------===//
// Instruction patterns:
def : Pat<(v4i32 v4i32Imm:$imm),
(IOHLvec (v4i32 (ILHUv4i32 (HI16_vec v4i32Imm:$imm))),
(LO16_vec v4i32Imm:$imm))>;
+
+// 8-bit constants
+def : Pat<(i8 imm:$imm),
+ (ILHr8 imm:$imm)>;
//===----------------------------------------------------------------------===//
// Call instruction patterns:
def : Pat<(sext_inreg R32C:$rSrc, i8),
(XSHWr32 (XSBHr32 R32C:$rSrc))>;
+def : Pat<(i32 (sext R8C:$rSrc)),
+ (XSHWr16 (XSBHr8 R8C:$rSrc))>;
+
def : Pat<(SPUextract_i8_sext VECREG:$rSrc),
(XSHWr32 (XSBHr32 (ORi32_v4i32 (v4i32 VECREG:$rSrc),
(v4i32 VECREG:$rSrc))))>;
+// zext 8->16: Zero extend bytes to halfwords
+def : Pat<(i16 (zext R8C:$rSrc)),
+ (ANDHI1To2 R8C:$rSrc, 0xff)>;
+
+// zext 8->32 from preferred slot in load/store
def : Pat<(SPUextract_i8_zext VECREG:$rSrc),
(ANDIr32 (ORi32_v4i32 (v4i32 VECREG:$rSrc), (v4i32 VECREG:$rSrc)),
0xff)>;
+// zext 8->32: Zero extend bytes to words
+def : Pat<(i32 (zext R8C:$rSrc)),
+ (ANDI1To4 R8C:$rSrc, 0xff)>;
+
+// anyext 8->16: Extend 8->16 bits, irrespective of sign
+def : Pat<(i16 (anyext R8C:$rSrc)),
+ (ORHI1To2 R8C:$rSrc, 0)>;
+
+// anyext 8->32: Extend 8->32 bits, irrespective of sign
+def : Pat<(i32 (anyext R8C:$rSrc)),
+ (ORI1To4 R8C:$rSrc, 0)>;
+
// zext 16->32: Zero extend halfwords to words (note that we have to juggle the
// 0xffff constant since it will not fit into an immediate.)
def : Pat<(i32 (zext R16C:$rSrc)),
(ORI2To4 R16C:$rSrc, 0)>;
//===----------------------------------------------------------------------===//
-// Address translation: SPU, like PPC, has to split addresses into high and
+// Address generation: SPU, like PPC, has to split addresses into high and
// low parts in order to load them into a register.
//===----------------------------------------------------------------------===//
-def : Pat<(SPUhi tglobaladdr:$in, 0), (ILHUhi tglobaladdr:$in)>;
-def : Pat<(SPUlo tglobaladdr:$in, 0), (ILAlo tglobaladdr:$in)>;
-def : Pat<(SPUdform tglobaladdr:$in, imm:$imm), (ILAlsa tglobaladdr:$in)>;
-def : Pat<(SPUhi tconstpool:$in , 0), (ILHUhi tconstpool:$in)>;
-def : Pat<(SPUlo tconstpool:$in , 0), (ILAlo tconstpool:$in)>;
-def : Pat<(SPUdform tconstpool:$in, imm:$imm), (ILAlsa tconstpool:$in)>;
-def : Pat<(SPUhi tjumptable:$in, 0), (ILHUhi tjumptable:$in)>;
-def : Pat<(SPUlo tjumptable:$in, 0), (ILAlo tjumptable:$in)>;
-def : Pat<(SPUdform tjumptable:$in, imm:$imm), (ILAlsa tjumptable:$in)>;
-
-// Force load of global address to a register. These forms show up in
-// SPUISD::DFormAddr pseudo instructions:
-/*
-def : Pat<(add tglobaladdr:$in, 0), (ILAlsa tglobaladdr:$in)>;
-def : Pat<(add tconstpool:$in, 0), (ILAlsa tglobaladdr:$in)>;
-def : Pat<(add tjumptable:$in, 0), (ILAlsa tglobaladdr:$in)>;
- */
+def : Pat<(SPUaform tglobaladdr:$in, 0), (ILAlsa tglobaladdr:$in)>;
+def : Pat<(SPUaform texternalsym:$in, 0), (ILAlsa texternalsym:$in)>;
+def : Pat<(SPUaform tjumptable:$in, 0), (ILAlsa tjumptable:$in)>;
+def : Pat<(SPUaform tconstpool:$in, 0), (ILAlsa tconstpool:$in)>;
+
+def : Pat<(SPUindirect (SPUhi tglobaladdr:$in, 0),
+ (SPUlo tglobaladdr:$in, 0)),
+ (IOHLlo (ILHUhi tglobaladdr:$in), tglobaladdr:$in)>;
+
+def : Pat<(SPUindirect (SPUhi texternalsym:$in, 0),
+ (SPUlo texternalsym:$in, 0)),
+ (IOHLlo (ILHUhi texternalsym:$in), texternalsym:$in)>;
+
+def : Pat<(SPUindirect (SPUhi tjumptable:$in, 0),
+ (SPUlo tjumptable:$in, 0)),
+ (IOHLlo (ILHUhi tjumptable:$in), tjumptable:$in)>;
+
+def : Pat<(SPUindirect (SPUhi tconstpool:$in, 0),
+ (SPUlo tconstpool:$in, 0)),
+ (IOHLlo (ILHUhi tconstpool:$in), tconstpool:$in)>;
+
+def : Pat<(add (SPUhi tglobaladdr:$in, 0), (SPUlo tglobaladdr:$in, 0)),
+ (IOHLlo (ILHUhi tglobaladdr:$in), tglobaladdr:$in)>;
+
+def : Pat<(add (SPUhi texternalsym:$in, 0), (SPUlo texternalsym:$in, 0)),
+ (IOHLlo (ILHUhi texternalsym:$in), texternalsym:$in)>;
+
+def : Pat<(add (SPUhi tjumptable:$in, 0), (SPUlo tjumptable:$in, 0)),
+ (IOHLlo (ILHUhi tjumptable:$in), tjumptable:$in)>;
+
+def : Pat<(add (SPUhi tconstpool:$in, 0), (SPUlo tconstpool:$in, 0)),
+ (IOHLlo (ILHUhi tconstpool:$in), tconstpool:$in)>;
+
// Instrinsics:
include "CellSDKIntrinsics.td"