KVM: x86: avoid uninitialized variable warning
authorPaolo Bonzini <pbonzini@redhat.com>
Sun, 6 Sep 2015 14:24:50 +0000 (16:24 +0200)
committerPaolo Bonzini <pbonzini@redhat.com>
Sun, 6 Sep 2015 14:26:21 +0000 (16:26 +0200)
This does not show up on all compiler versions, so it sneaked into the
first 4.3 pull request.  The fix is to mimic the logic of the "print
sptes" loop in the "fill array" loop.  Then leaf and root can be
both initialized unconditionally.

Note that "leaf" now points to the first unused element of the array,
not the last filled element.

Reported-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu.c

index fb16a8ea3dee026d24a6e47332326226cf3a70e0..69088a1ba5090ffa763a56f5c105560f360de767 100644 (file)
@@ -3309,13 +3309,14 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
 
        walk_shadow_page_lockless_begin(vcpu);
 
-       for (shadow_walk_init(&iterator, vcpu, addr), root = iterator.level;
+       for (shadow_walk_init(&iterator, vcpu, addr),
+                leaf = root = iterator.level;
             shadow_walk_okay(&iterator);
             __shadow_walk_next(&iterator, spte)) {
-               leaf = iterator.level;
                spte = mmu_spte_get_lockless(iterator.sptep);
 
                sptes[leaf - 1] = spte;
+               leaf--;
 
                if (!is_shadow_present_pte(spte))
                        break;
@@ -3329,7 +3330,7 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
        if (reserved) {
                pr_err("%s: detect reserved bits on spte, addr 0x%llx, dump hierarchy:\n",
                       __func__, addr);
-               while (root >= leaf) {
+               while (root > leaf) {
                        pr_err("------ spte 0x%llx level %d.\n",
                               sptes[root - 1], root);
                        root--;