Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
Value *Op = LI.getOperand(0);
+ // Attempt to improve the alignment.
+ unsigned KnownAlign = GetKnownAlignment(Op, TD);
+ if (KnownAlign > LI.getAlignment())
+ LI.setAlignment(KnownAlign);
+
// load (cast X) --> cast (load X) iff safe
if (isa<CastInst>(Op))
if (Instruction *Res = InstCombineLoadCast(*this, LI))
}
}
+ // Attempt to improve the alignment.
+ unsigned KnownAlign = GetKnownAlignment(Ptr, TD);
+ if (KnownAlign > SI.getAlignment())
+ SI.setAlignment(KnownAlign);
+
// Do really simple DSE, to catch cases where there are several consequtive
// stores to the same location, separated by a few arithmetic operations. This
// situation often occurs with bitfield accesses.
--- /dev/null
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | grep {, align 16} | wc -l | grep 12
+
+@x = external global <2 x i64>, align 16
+@xx = external global [13 x <2 x i64>], align 16
+
+define <2 x i64> @static_hem() {
+ %t = getelementptr <2 x i64>* @x, i32 7
+ %tmp1 = load <2 x i64>* %t, align 1
+ ret <2 x i64> %tmp1
+}
+
+define <2 x i64> @hem(i32 %i) {
+ %t = getelementptr <2 x i64>* @x, i32 %i
+ %tmp1 = load <2 x i64>* %t, align 1
+ ret <2 x i64> %tmp1
+}
+
+define <2 x i64> @hem_2d(i32 %i, i32 %j) {
+ %t = getelementptr [13 x <2 x i64>]* @xx, i32 %i, i32 %j
+ %tmp1 = load <2 x i64>* %t, align 1
+ ret <2 x i64> %tmp1
+}
+
+define <2 x i64> @foo() {
+ %tmp1 = load <2 x i64>* @x, align 1
+ ret <2 x i64> %tmp1
+}
+
+define <2 x i64> @bar() {
+ %t = alloca <2 x i64>
+ call void @kip(<2 x i64>* %t);
+ %tmp1 = load <2 x i64>* %t, align 1
+ ret <2 x i64> %tmp1
+}
+
+define void @static_hem_store(<2 x i64> %y) {
+ %t = getelementptr <2 x i64>* @x, i32 7
+ store <2 x i64> %y, <2 x i64>* %t, align 1
+ ret void
+}
+
+define void @hem_store(i32 %i, <2 x i64> %y) {
+ %t = getelementptr <2 x i64>* @x, i32 %i
+ store <2 x i64> %y, <2 x i64>* %t, align 1
+ ret void
+}
+
+define void @hem_2d_store(i32 %i, i32 %j, <2 x i64> %y) {
+ %t = getelementptr [13 x <2 x i64>]* @xx, i32 %i, i32 %j
+ store <2 x i64> %y, <2 x i64>* %t, align 1
+ ret void
+}
+
+define void @foo_store(<2 x i64> %y) {
+ store <2 x i64> %y, <2 x i64>* @x, align 1
+ ret void
+}
+
+define void @bar_store(<2 x i64> %y) {
+ %t = alloca <2 x i64>
+ call void @kip(<2 x i64>* %t);
+ store <2 x i64> %y, <2 x i64>* %t, align 1
+ ret void
+}
+
+declare void @kip(<2 x i64>* %t)