[<prev] [next>] [day] [month] [year] [list]
Message-Id: <20210604023848.10549-1-13145886936@163.com>
Date: Thu, 3 Jun 2021 19:38:48 -0700
From: 13145886936@....com
To: pbonzini@...hat.com
Cc: kvm@...r.kernel.org, linux-kernel@...r.kernel.org,
gushengxian <gushengxian@...ong.com>
Subject: [PATCH] KVM: Revised the use of space and tabs
From: gushengxian <gushengxian@...ong.com>
Revised the use of space and tabs.
Signed-off-by: gushengxian <gushengxian@...ong.com>
---
virt/kvm/kvm_main.c | 20 +++++++++++---------
1 file changed, 11 insertions(+), 9 deletions(-)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index eb440eb1225a..4cec505af62b 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -132,7 +132,9 @@ static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
* passed to a compat task, let the ioctls fail.
*/
static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl,
- unsigned long arg) { return -EINVAL; }
+ unsigned long arg) {
+ return -EINVAL;
+ }
static int kvm_no_compat_open(struct inode *inode, struct file *file)
{
@@ -2104,7 +2106,7 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma,
* Whoever called remap_pfn_range is also going to call e.g.
* unmap_mapping_range before the underlying pages are freed,
* causing a call to our MMU notifier.
- */
+ */
kvm_get_pfn(pfn);
out:
@@ -2417,7 +2419,7 @@ static void __kvm_unmap_gfn(struct kvm *kvm,
map->page = NULL;
}
-int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
+int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
struct gfn_to_pfn_cache *cache, bool dirty, bool atomic)
{
__kvm_unmap_gfn(vcpu->kvm, gfn_to_memslot(vcpu->kvm, map->gfn), map,
@@ -2576,7 +2578,7 @@ int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned l
EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest);
static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
- void *data, int offset, unsigned long len)
+ void *data, int offset, unsigned long len)
{
int r;
unsigned long addr;
@@ -2604,8 +2606,8 @@ int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic);
static int __kvm_write_guest_page(struct kvm *kvm,
- struct kvm_memory_slot *memslot, gfn_t gfn,
- const void *data, int offset, int len)
+ struct kvm_memory_slot *memslot, gfn_t gfn,
+ const void *data, int offset, int len)
{
int r;
unsigned long addr;
@@ -2660,7 +2662,7 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
EXPORT_SYMBOL_GPL(kvm_write_guest);
int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
- unsigned long len)
+ unsigned long len)
{
gfn_t gfn = gpa >> PAGE_SHIFT;
int seg;
@@ -2823,8 +2825,8 @@ int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
EXPORT_SYMBOL_GPL(kvm_clear_guest);
void mark_page_dirty_in_slot(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- gfn_t gfn)
+ struct kvm_memory_slot *memslot,
+ gfn_t gfn)
{
if (memslot && kvm_slot_dirty_track_enabled(memslot)) {
unsigned long rel_gfn = gfn - memslot->base_gfn;
--
2.25.1
Powered by blists - more mailing lists