[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <YW9XCp3B+ogPIl7i@google.com>
Date: Tue, 19 Oct 2021 23:38:50 +0000
From: Sean Christopherson <seanjc@...gle.com>
To: "Maciej S. Szmigiero" <mail@...iej.szmigiero.name>
Cc: Paolo Bonzini <pbonzini@...hat.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Igor Mammedov <imammedo@...hat.com>,
Marc Zyngier <maz@...nel.org>,
James Morse <james.morse@....com>,
Julien Thierry <julien.thierry.kdev@...il.com>,
Suzuki K Poulose <suzuki.poulose@....com>,
Huacai Chen <chenhuacai@...nel.org>,
Aleksandar Markovic <aleksandar.qemu.devel@...il.com>,
Paul Mackerras <paulus@...abs.org>,
Christian Borntraeger <borntraeger@...ibm.com>,
Janosch Frank <frankja@...ux.ibm.com>,
David Hildenbrand <david@...hat.com>,
Cornelia Huck <cohuck@...hat.com>,
Claudio Imbrenda <imbrenda@...ux.ibm.com>,
Joerg Roedel <joro@...tes.org>, kvm@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH v5 05/13] KVM: Integrate gfn_to_memslot_approx() into
search_memslots()
On Mon, Sep 20, 2021, Maciej S. Szmigiero wrote:
> @@ -1267,7 +1280,7 @@ search_memslots(struct kvm_memslots *slots, gfn_t gfn, int *index)
> * itself isn't here as an inline because that would bloat other code too much.
> */
> static inline struct kvm_memory_slot *
> -__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
> +__gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn, bool approx)
This function name is a misnomer. The helper is not an "approx" version, it's an
inner helper that takes an @approx param. Unless someone has a more clever name,
the dreaded four underscores seems like the way to go. Warning away users is a
good thing in this case...
> {
> struct kvm_memory_slot *slot;
> int slot_index = atomic_read(&slots->last_used_slot);
> @@ -1276,7 +1289,7 @@ __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
> if (slot)
> return slot;
>
> - slot = search_memslots(slots, gfn, &slot_index);
> + slot = search_memslots(slots, gfn, &slot_index, approx);
> if (slot) {
> atomic_set(&slots->last_used_slot, slot_index);
> return slot;
> @@ -1285,6 +1298,12 @@ __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
> return NULL;
> }
>
There's a comment that doesn't show up in this diff that should also be moved,
and opportunistically updated.
> +static inline struct kvm_memory_slot *
> +__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
> +{
> + return __gfn_to_memslot_approx(slots, gfn, false);
> +}
> +
> static inline unsigned long
> __gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
> {
E.g. this as fixup?
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 540fa948baa5..2964c773b36c 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1964,10 +1964,15 @@ static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
return 0;
}
+static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
+{
+ return ____gfn_to_memslot(slots, cur_gfn, true);
+}
+
static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
unsigned long cur_gfn)
{
- struct kvm_memory_slot *ms = __gfn_to_memslot_approx(slots, cur_gfn, true);
+ struct kvm_memory_slot *ms = gfn_to_memslot_approx(slots, cur_gfn);
int slotidx = ms - slots->memslots;
unsigned long ofs = cur_gfn - ms->base_gfn;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 8fd9644f40b2..ec1a074c2f6e 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1274,13 +1274,8 @@ search_memslots(struct kvm_memslots *slots, gfn_t gfn, int *index, bool approx)
return NULL;
}
-/*
- * __gfn_to_memslot() and its descendants are here because it is called from
- * non-modular code in arch/powerpc/kvm/book3s_64_vio{,_hv}.c. gfn_to_memslot()
- * itself isn't here as an inline because that would bloat other code too much.
- */
static inline struct kvm_memory_slot *
-__gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn, bool approx)
+____gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn, bool approx)
{
struct kvm_memory_slot *slot;
int slot_index = atomic_read(&slots->last_used_slot);
@@ -1298,10 +1293,15 @@ __gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn, bool approx)
return NULL;
}
+/*
+ * __gfn_to_memslot() and its descendants are here to allow arch code to inline
+ * the lookups in hot paths. gfn_to_memslot() itself isn't here as an inline
+ * because that would bloat other code too much.
+ */
static inline struct kvm_memory_slot *
__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
{
- return __gfn_to_memslot_approx(slots, gfn, false);
+ return ____gfn_to_memslot(slots, gfn, false);
}
static inline unsigned long
Powered by blists - more mailing lists