[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAAd53p6bRnhGOfLHBz4WyTQVKocZPK0d5m4oRdeLhWxkx-5tLw@mail.gmail.com>
Date: Thu, 16 Nov 2023 13:22:09 +0800
From: Kai-Heng Feng <kai.heng.feng@...onical.com>
To: Ricky WU <ricky_wu@...ltek.com>
Cc: Lukas Wunner <lukas@...ner.de>,
"bhelgaas@...gle.com" <bhelgaas@...gle.com>,
"linux-pm@...r.kernel.org" <linux-pm@...r.kernel.org>,
"linux-mmc@...r.kernel.org" <linux-mmc@...r.kernel.org>,
Kees Cook <keescook@...omium.org>,
Tony Luck <tony.luck@...el.com>,
"Guilherme G. Piccoli" <gpiccoli@...lia.com>,
"linux-pci@...r.kernel.org" <linux-pci@...r.kernel.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH] PCI: pciehp: Prevent child devices from doing RPM on PCIe
Link Down
Hi Ricky,
On Fri, Nov 10, 2023 at 10:41 AM Ricky WU <ricky_wu@...ltek.com> wrote:
>
> > Hi Lukas and Ricky,
> >
> > I think the following approach should cover all the cases?
> > Ricky, can you please give it a try?
> >
>
> This patch is invalid for this issue,
> Problem reappears after plugging and unplugging twice
> Dmesg as below,
My bad. The pm_runtime_barrier() should be called earlier.
Can you please give the following a try:
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index d749ea8250d6..c69b4ce5dbfd 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/pci.h>
#include <linux/module.h>
+#include <linux/pm_runtime.h>
#include "pci.h"
static void pci_free_resources(struct pci_dev *dev)
@@ -18,6 +19,7 @@ static void pci_stop_dev(struct pci_dev *dev)
pci_pme_active(dev, false);
if (pci_dev_is_added(dev)) {
+ pm_runtime_barrier(&dev->dev);
device_release_driver(&dev->dev);
pci_proc_detach_device(dev);
> [...]
> [ 360.771161] pcieport 0000:00:1c.0: pciehp: Slot(8): Link Down
> [ 360.771165] pcieport 0000:00:1c.0: pciehp: Slot(8): Card not present
> [ 361.986932] pcieport 0000:00:1c.0: pciehp: Slot(8): Card present
> [ 361.986937] pcieport 0000:00:1c.0: pciehp: Slot(8): Link Up
> [ 362.120635] pci 0000:01:00.0: [15b7:5007] type 00 class 0x010802
> [ 362.120691] pci 0000:01:00.0: reg 0x10: [mem 0x00000000-0x00003fff 64bit]
> [ 362.120747] pci 0000:01:00.0: reg 0x20: [mem 0x00000000-0x000000ff 64bit]
> [ 362.121606] pci 0000:01:00.0: BAR 0: assigned [mem 0xa3b00000-0xa3b03fff 64bit]
> [ 362.121646] pci 0000:01:00.0: BAR 4: assigned [mem 0xa3b04000-0xa3b040ff 64bit]
> [ 362.121686] pcieport 0000:00:1c.0: PCI bridge to [bus 01]
> [ 362.121702] pcieport 0000:00:1c.0: bridge window [io 0x4000-0x4fff]
> [ 362.121743] pcieport 0000:00:1c.0: bridge window [mem 0xa3b00000-0xa44fffff]
> [ 362.121761] pcieport 0000:00:1c.0: bridge window [mem 0xa0a00000-0xa13fffff 64bit pref]
> [ 362.171709] nvme nvme0: pci function 0000:01:00.0
> [ 362.171726] nvme 0000:01:00.0: enabling device (0000 -> 0002)
> [ 362.256724] nvme nvme0: 4/0/0 default/read/poll queues
> [ 362.261656] nvme0n1: p1 p2
> [ 369.343246] pcieport 0000:00:1c.0: pciehp: Slot(8): Link Down
> [ 369.457450] pcieport 0000:00:1c.0: pciehp: Slot(8): Card present
> [ 369.457462] pcieport 0000:00:1c.0: pciehp: Slot(8): Link Up
> [ 369.592600] pci 0000:01:00.0: [10ec:5261] type 00 class 0xff0000
> [ 369.592654] pci 0000:01:00.0: reg 0x10: [mem 0xa3b00000-0xa3b00fff]
> [ 369.592776] pci 0000:01:00.0: Upstream bridge's Max Payload Size set to 128 (was 256, max 256)
> [ 369.592797] pci 0000:01:00.0: Max Payload Size set to 128 (was 128, max 128)
> [ 369.592964] pci 0000:01:00.0: supports D1 D2
> [ 369.592970] pci 0000:01:00.0: PME# supported from D1 D2 D3hot D3cold
> [ 369.593642] pci 0000:01:00.0: BAR 0: assigned [mem 0xa3b00000-0xa3b00fff]
> [ 369.593662] pcieport 0000:00:1c.0: PCI bridge to [bus 01]
> [ 369.593679] pcieport 0000:00:1c.0: bridge window [io 0x4000-0x4fff]
> [ 369.593692] pcieport 0000:00:1c.0: bridge window [mem 0xa3b00000-0xa44fffff]
> [ 369.593703] pcieport 0000:00:1c.0: bridge window [mem 0xa0a00000-0xa13fffff 64bit pref]
> [ 372.573757] pcieport 0000:00:1c.0: pciehp: Slot(8): Link Down
> [ 372.586620] BUG: unable to handle page fault for address: ffffc90000089010
> [ 372.586624] #PF: supervisor read access in kernel mode
> [ 372.586626] #PF: error_code(0x0000) - not-present page
> [ 372.586627] PGD 100000067 P4D 100000067 PUD 10020a067 PMD 10020b067 PTE 0
> [ 372.586632] Oops: 0000 [#1] PREEMPT SMP PTI
> [ 372.586634] CPU: 2 PID: 157 Comm: kworker/2:2 Not tainted 6.6.0-rc4 #12
> [ 372.586637] Hardware name: To Be Filled By O.E.M. To Be Filled By O.E.M./H370M Pro4, BIOS P3.40 10/25/2018
> [ 372.586639] Workqueue: pm pm_runtime_work
> [ 372.586644] RIP: 0010:ioread32+0x2e/0x70
> [ 372.586647] Code: ff 03 00 77 25 48 81 ff 00 00 01 00 77 14 8b 15 98 ac 52 01 b8 ff ff ff ff 85 d2 75 14 c3 cc cc cc cc 89 fa ed c3 cc cc cc cc <8b> 07 c3 cc cc cc cc 55 83 ea 01 48 89 fe 48 c7 c7 f8 97 57 86 48
> [ 372.586649] RSP: 0018:ffffc90000543d50 EFLAGS: 00010296
> [ 372.586652] RAX: ffffc90000089000 RBX: 000000000000032f RCX: 000000000000007f
> [ 372.586653] RDX: 000000000000ff00 RSI: ffffc90000089010 RDI: ffffc90000089010
> [ 372.586655] RBP: ffffc90000543d70 R08: ffffc90000089010 R09: ffff88816a132368
> [ 372.586656] R10: 0000000000000000 R11: 0000000000000003 R12: 000000000000007f
> [ 372.586657] R13: ffff8881060ed000 R14: ffff8881060ed100 R15: 0000000000009003
> [ 372.586659] FS: 0000000000000000(0000) GS:ffff88816a100000(0000) knlGS:0000000000000000
> [ 372.586661] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
> [ 372.586662] CR2: ffffc90000089010 CR3: 0000000043032006 CR4: 00000000003706e0
> [ 372.586664] Call Trace:
> [ 372.586665] <TASK>
> [ 372.586667] ? show_regs+0x68/0x70
> [ 372.586671] ? __die_body+0x20/0x70
> [ 372.586674] ? __die+0x2b/0x40
> [ 372.586677] ? page_fault_oops+0x160/0x480
> [ 372.586680] ? search_bpf_extables+0x63/0x90
> [ 372.586684] ? ioread32+0x2e/0x70
> [ 372.586686] ? search_exception_tables+0x5f/0x70
> [ 372.586689] ? kernelmode_fixup_or_oops+0xa2/0x120
> [ 372.586692] ? __bad_area_nosemaphore+0x179/0x230
> [ 372.586696] ? bad_area_nosemaphore+0x16/0x20
> [ 372.586698] ? do_kern_addr_fault+0x8b/0xa0
> [ 372.586701] ? exc_page_fault+0xe5/0x180
> [ 372.586705] ? asm_exc_page_fault+0x27/0x30
> [ 372.586710] ? ioread32+0x2e/0x70
> [ 372.586714] ? rtsx_pci_write_register+0x5b/0x90 [rtsx_pci]
> [ 372.586723] rtsx_set_l1off_sub+0x1c/0x30 [rtsx_pci]
> [ 372.586731] rts5261_set_l1off_cfg_sub_d0+0x36/0x40 [rtsx_pci]
> [ 372.586740] rtsx_pci_runtime_idle+0xc7/0x160 [rtsx_pci]
> [ 372.586748] ? __pfx_pci_pm_runtime_idle+0x10/0x10
> [ 372.586751] pci_pm_runtime_idle+0x34/0x70
> [ 372.586753] rpm_idle+0xc4/0x2b0
> [ 372.586756] pm_runtime_work+0x93/0xc0
> [ 372.586759] process_scheduled_works+0x9a/0x390
> [ 372.586762] ? __pfx_worker_thread+0x10/0x10
> [ 372.586764] worker_thread+0x15b/0x2d0
> [ 372.586767] ? __pfx_worker_thread+0x10/0x10
> [ 372.586769] kthread+0x106/0x140
> [ 372.586771] ? __pfx_kthread+0x10/0x10
> [ 372.586774] ret_from_fork+0x39/0x60
> [ 372.586776] ? __pfx_kthread+0x10/0x10
> [ 372.586778] ret_from_fork_asm+0x1b/0x30
> [ 372.586783] </TASK>
> [ 372.586784] Modules linked in: nvme nvme_core snd_hda_codec_hdmi snd_hda_codec_realtek snd_hda_codec_generic ledtrig_audio nls_iso8859_1 snd_sof_pci_intel_cnl snd_sof_intel_hda_common snd_soc_hdac_hda soundwire_intel soundwire_generic_allocation snd_sof_intel_hda_mlink soundwire_cadence snd_sof_intel_hda snd_sof_pci snd_sof_xtensa_dsp snd_sof snd_sof_utils snd_hda_ext_core snd_soc_acpi_intel_match snd_soc_acpi soundwire_bus intel_rapl_msr intel_rapl_common snd_soc_core x86_pkg_temp_thermal intel_powerclamp coretemp snd_compress ac97_bus kvm_intel snd_pcm_dmaengine i915 snd_hda_intel snd_intel_dspcfg snd_intel_sdw_acpi kvm snd_hda_codec snd_hda_core mei_hdcp crct10dif_pclmul ghash_clmulni_intel snd_hwdep snd_pcm sha512_ssse3 aesni_intel snd_seq_midi crypto_simd snd_seq_midi_event cryptd rapl snd_rawmidi intel_cstate drm_buddy binfmt_misc ttm snd_seq drm_display_helper joydev input_leds cec snd_seq_device wmi_bmof snd_timer ee1004 mei_me rc_core snd drm_kms_helper mei intel_pch_thermal i2c_algo_bit soundcore mac_hid
> [ 372.586834] acpi_tad acpi_pad sch_fq_codel msr parport_pc ppdev lp parport drm ramoops reed_solomon efi_pstore ip_tables x_tables autofs4 hid_generic usbhid hid rtsx_pci_sdmmc ahci i2c_i801 e1000e crc32_pclmul rtsx_pci i2c_smbus xhci_pci libahci xhci_pci_renesas video wmi
> [ 372.586856] CR2: ffffc90000089010
> [ 372.586858] ---[ end trace 0000000000000000 ]---
> [ 372.746808] RIP: 0010:ioread32+0x2e/0x70
> [ 372.746816] Code: ff 03 00 77 25 48 81 ff 00 00 01 00 77 14 8b 15 98 ac 52 01 b8 ff ff ff ff 85 d2 75 14 c3 cc cc cc cc 89 fa ed c3 cc cc cc cc <8b> 07 c3 cc cc cc cc 55 83 ea 01 48 89 fe 48 c7 c7 f8 97 57 86 48
> [ 372.746818] RSP: 0018:ffffc90000543d50 EFLAGS: 00010296
> [ 372.746821] RAX: ffffc90000089000 RBX: 000000000000032f RCX: 000000000000007f
> [ 372.746823] RDX: 000000000000ff00 RSI: ffffc90000089010 RDI: ffffc90000089010
> [ 372.746825] RBP: ffffc90000543d70 R08: ffffc90000089010 R09: ffff88816a132368
> [ 372.746826] R10: 0000000000000000 R11: 0000000000000003 R12: 000000000000007f
> [ 372.746828] R13: ffff8881060ed000 R14: ffff8881060ed100 R15: 0000000000009003
> [ 372.746830] FS: 0000000000000000(0000) GS:ffff88816a100000(0000) knlGS:0000000000000000
> [ 372.746832] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
> [ 372.746833] CR2: ffffc90000089010 CR3: 000000010a564004 CR4: 00000000003706e0
> [ 372.746835] note: kworker/2:2[157] exited with irqs disabled
>
> > diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c index
> > d749ea8250d6..907d60587227 100644
> > --- a/drivers/pci/remove.c
> > +++ b/drivers/pci/remove.c
> > @@ -1,6 +1,7 @@
> > // SPDX-License-Identifier: GPL-2.0
> > #include <linux/pci.h>
> > #include <linux/module.h>
> > +#include <linux/pm_runtime.h>
> > #include "pci.h"
> >
> > static void pci_free_resources(struct pci_dev *dev) @@ -89,6 +90,8 @@
> > static void pci_remove_bus_device(struct pci_dev *dev)
> > struct pci_bus *bus = dev->subordinate;
> > struct pci_dev *child, *tmp;
> >
> > + pm_runtime_barrier(&dev->dev);
> > +
> > if (bus) {
> > list_for_each_entry_safe(child, tmp,
> > &bus->devices, bus_list)
> >
> >
>
Powered by blists - more mailing lists