]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
KVM: MMU: avoid double write protected in sync page path
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Fri, 11 Jun 2010 13:30:36 +0000 (21:30 +0800)
committerAvi Kivity <avi@redhat.com>
Sun, 1 Aug 2010 07:46:41 +0000 (10:46 +0300)
The sync page is already write protected in mmu_sync_children(), don't
write protected it again

Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
arch/x86/kvm/mmu.c

index aa98fca03ed7a6d104e91b0ae783a84397ffe498..ff333572be75e920f54b57d6694b5db47ac36b04 100644 (file)
@@ -1216,6 +1216,7 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
                if ((sp)->gfn != (gfn) || (sp)->role.direct ||          \
                        (sp)->role.invalid) {} else
 
+/* @sp->gfn should be write-protected at the call site */
 static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
                           struct list_head *invalid_list, bool clear_unsync)
 {
@@ -1224,11 +1225,8 @@ static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
                return 1;
        }
 
-       if (clear_unsync) {
-               if (rmap_write_protect(vcpu->kvm, sp->gfn))
-                       kvm_flush_remote_tlbs(vcpu->kvm);
+       if (clear_unsync)
                kvm_unlink_unsync_page(vcpu->kvm, sp);
-       }
 
        if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
                kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);