[PATCH 1/1] [CVE-2012-1601] [HARDY] KVM: Ensure all vcpus are consistent with in-kernel irqchip settings

Brad Figg brad.figg at canonical.com
Wed Apr 25 19:11:51 UTC 2012


On 04/25/2012 12:05 PM, Tim Gardner wrote:
> On 04/25/2012 11:53 AM, Brad Figg wrote:
>> From: Avi Kivity <avi at redhat.com>
>>
>> CVE-2012-1601
>>
>> BugLink: http://bugs.launchpad.net/bugs/971685
>>
>> If some vcpus are created before KVM_CREATE_IRQCHIP, then
>> irqchip_in_kernel() and vcpu->arch.apic will be inconsistent, leading
>> to potential NULL pointer dereferences.
>>
>> Fix by:
>> - ensuring that no vcpus are installed when KVM_CREATE_IRQCHIP is called
>> - ensuring that a vcpu has an apic if it is installed after KVM_CREATE_IRQCHIP
>>
>> This is somewhat long winded because vcpu->arch.apic is created without
>> kvm->lock held.
>>
>> Based on earlier patch by Michael Ellerman.
>>
>> Signed-off-by: Michael Ellerman <michael at ellerman.id.au>
>> Signed-off-by: Avi Kivity <avi at redhat.com>
>> (backported from commit 3e515705a1f46beb1c942bb8043c16f8ac7b1e9e upstream)
>> Signed-off-by: Brad Figg <brad.figg at canonical.com>
>> ---
>>  arch/x86/kvm/x86.c       |    9 +++++++++
>>  include/linux/kvm_host.h |    2 ++
>>  virt/kvm/kvm_main.c      |    5 +++++
>>  3 files changed, 16 insertions(+)
>>
>> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
>> index 2085040..f036054 100644
>> --- a/arch/x86/kvm/x86.c
>> +++ b/arch/x86/kvm/x86.c
>> @@ -1582,6 +1582,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
>>  		break;
>>  	}
>>  	case KVM_CREATE_IRQCHIP:
>> +		r = -EINVAL;
>> +		if (atomic_read(&kvm->online_vcpus))

Use here

>> +			goto out;
>>  		r = -ENOMEM;
>>  		kvm->arch.vpic = kvm_create_pic(kvm);
>>  		if (kvm->arch.vpic) {
>> @@ -3350,6 +3353,11 @@ void kvm_arch_check_processor_compat(void *rtn)
>>  	kvm_x86_ops->check_processor_compatibility(rtn);
>>  }
>>  
>> +bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
>> +{
>> +	return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
>> +}
>> +
>>  int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
>>  {
>>  	struct page *page;
>> @@ -3435,6 +3443,7 @@ static void kvm_free_vcpus(struct kvm *kvm)
>>  		}
>>  	}
>>  
>> +	atomic_set(&kvm->online_vcpus, 0);

And here

>>  }
>>  
>>  void kvm_arch_destroy_vm(struct kvm *kvm)
>> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
>> index 958e003..01055ae 100644
>> --- a/include/linux/kvm_host.h
>> +++ b/include/linux/kvm_host.h
>> @@ -121,6 +121,7 @@ struct kvm {
>>  	struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
>>  					KVM_PRIVATE_MEM_SLOTS];
>>  	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
>> +	atomic_t online_vcpus;
> 
> This seems spurious. Its not part of the upstream patch, nor is it used
> anywhere.
> 
>>  	struct list_head vm_list;
>>  	struct file *filp;
>>  	struct kvm_io_bus mmio_bus;
>> @@ -306,5 +307,6 @@ struct kvm_stats_debugfs_item {
>>  	struct dentry *dentry;
>>  };
>>  extern struct kvm_stats_debugfs_item debugfs_entries[];
>> +bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
>>  
>>  #endif
>> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
>> index 240156e..b128dcc 100644
>> --- a/virt/kvm/kvm_main.c
>> +++ b/virt/kvm/kvm_main.c
>> @@ -798,12 +798,17 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
>>  		goto vcpu_destroy;
>>  
>>  	mutex_lock(&kvm->lock);
>> +	if (!kvm_vcpu_compatible(vcpu)) {
>> +		r = -EINVAL;
>> +		goto vcpu_destroy;
>> +	}
>>  	if (kvm->vcpus[n]) {
>>  		r = -EEXIST;
>>  		mutex_unlock(&kvm->lock);
>>  		goto vcpu_destroy;
>>  	}
>>  	kvm->vcpus[n] = vcpu;
>> +	atomic_inc(&kvm->online_vcpus);
> 
> See my comment above.
> 
>>  	mutex_unlock(&kvm->lock);
>>  
>>  	/* Now it's all set up, let userspace reach it */
> 
> I also noticed that openvz and xen probably need the same patch set.
> 
> rtg


-- 
Brad Figg brad.figg at canonical.com http://www.canonical.com




More information about the kernel-team mailing list