lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251216103238.625468-1-abhiraj21put@gmail.com>
Date: Tue, 16 Dec 2025 16:02:38 +0530
From: Abhishek Rajput <abhiraj21put@...il.com>
To: Alex Deucher <alexander.deucher@....com>,
	Christian König <christian.koenig@....com>,
	David Airlie <airlied@...il.com>,
	Simona Vetter <simona@...ll.ch>
Cc: amd-gfx@...ts.freedesktop.org,
	dri-devel@...ts.freedesktop.org,
	linux-kernel@...r.kernel.org,
	abhiraj21put@...il.com
Subject: [PATCH] drm/radeon: Convert legacy DRM logging in evergreen.c to drm_* helpers

Replace DRM_DEBUG(), DRM_ERROR(), and DRM_INFO() calls with the
corresponding drm_dbg(), drm_err(), and drm_info() helpers in the
radeon driver.

The drm_*() logging helpers take a struct drm_device * argument,
allowing the DRM core to prefix log messages with the correct device
name and instance. This is required to correctly distinguish log
messages on systems with multiple GPUs.

This change aligns radeon with the DRM TODO item:
"Convert logging to drm_* functions with drm_device parameter".

Signed-off-by: Abhishek Rajput <abhiraj21put@...il.com>

diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index bc4ab71613a5..3cbc6eedbf66 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1630,6 +1630,7 @@ void evergreen_pm_misc(struct radeon_device *rdev)
 	int req_cm_idx = rdev->pm.requested_clock_mode_index;
 	struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
 	struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
+	struct drm_device *ddev = rdev_to_drm(rdev);
 
 	if (voltage->type == VOLTAGE_SW) {
 		/* 0xff0x are flags rather then an actual voltage */
@@ -1638,7 +1639,7 @@ void evergreen_pm_misc(struct radeon_device *rdev)
 		if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) {
 			radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
 			rdev->pm.current_vddc = voltage->voltage;
-			DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage);
+			drm_dbg(ddev, "Setting: vddc: %d\n", voltage->voltage);
 		}
 
 		/* starting with BTC, there is one state that is used for both
@@ -1659,7 +1660,7 @@ void evergreen_pm_misc(struct radeon_device *rdev)
 		if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) {
 			radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI);
 			rdev->pm.current_vddci = voltage->vddci;
-			DRM_DEBUG("Setting: vddci: %d\n", voltage->vddci);
+			drm_dbg(ddev, "Setting: vddci: %d\n", voltage->vddci);
 		}
 	}
 }
@@ -2168,6 +2169,7 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
 	u32 pipe_offset = radeon_crtc->crtc_id * 16;
 	u32 tmp, arb_control3;
 	fixed20_12 a, b, c;
+	struct drm_device *ddev = rdev_to_drm(rdev);
 
 	if (radeon_crtc->base.enabled && num_heads && mode) {
 		active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
@@ -2244,14 +2246,14 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
 		    !evergreen_average_bandwidth_vs_available_bandwidth(&wm_high) ||
 		    !evergreen_check_latency_hiding(&wm_high) ||
 		    (rdev->disp_priority == 2)) {
-			DRM_DEBUG_KMS("force priority a to high\n");
+			drm_dbg_kms(ddev, "force priority a to high\n");
 			priority_a_cnt |= PRIORITY_ALWAYS_ON;
 		}
 		if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
 		    !evergreen_average_bandwidth_vs_available_bandwidth(&wm_low) ||
 		    !evergreen_check_latency_hiding(&wm_low) ||
 		    (rdev->disp_priority == 2)) {
-			DRM_DEBUG_KMS("force priority b to high\n");
+			drm_dbg_kms(ddev, "force priority b to high\n");
 			priority_b_cnt |= PRIORITY_ALWAYS_ON;
 		}
 
@@ -2401,6 +2403,7 @@ static int evergreen_pcie_gart_enable(struct radeon_device *rdev)
 {
 	u32 tmp;
 	int r;
+	struct drm_device *ddev = rdev_to_drm(rdev);
 
 	if (rdev->gart.robj == NULL) {
 		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
@@ -2448,7 +2451,7 @@ static int evergreen_pcie_gart_enable(struct radeon_device *rdev)
 	WREG32(VM_CONTEXT1_CNTL, 0);
 
 	evergreen_pcie_gart_tlb_flush(rdev);
-	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+	drm_info(ddev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
 		 (unsigned)(rdev->mc.gtt_size >> 20),
 		 (unsigned long long)rdev->gart.table_addr);
 	rdev->gart.ready = true;
@@ -2626,16 +2629,17 @@ static void evergreen_blank_dp_output(struct radeon_device *rdev,
 	unsigned stream_ctrl;
 	unsigned fifo_ctrl;
 	unsigned counter = 0;
+	struct drm_device *ddev = rdev_to_drm(rdev);
 
 	if (dig_fe >= ARRAY_SIZE(evergreen_dp_offsets)) {
-		DRM_ERROR("invalid dig_fe %d\n", dig_fe);
+		drm_err(ddev, "invalid dig_fe %d\n", dig_fe);
 		return;
 	}
 
 	stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
 			     evergreen_dp_offsets[dig_fe]);
 	if (!(stream_ctrl & EVERGREEN_DP_VID_STREAM_CNTL_ENABLE)) {
-		DRM_ERROR("dig %d , should be enable\n", dig_fe);
+		drm_err(ddev, "dig %d , should be enable\n", dig_fe);
 		return;
 	}
 
@@ -2652,7 +2656,7 @@ static void evergreen_blank_dp_output(struct radeon_device *rdev,
 				     evergreen_dp_offsets[dig_fe]);
 	}
 	if (counter >= 32)
-		DRM_ERROR("counter exceeds %d\n", counter);
+		drm_err(ddev, "counter exceeds %d\n", counter);
 
 	fifo_ctrl = RREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe]);
 	fifo_ctrl |= EVERGREEN_DP_STEER_FIFO_RESET;
@@ -2998,10 +3002,11 @@ static int evergreen_cp_start(struct radeon_device *rdev)
 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
 	int r, i;
 	uint32_t cp_me;
+	struct drm_device *ddev = rdev_to_drm(rdev);
 
 	r = radeon_ring_lock(rdev, ring, 7);
 	if (r) {
-		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+		drm_err(ddev, "radeon: cp failed to lock ring (%d).\n", r);
 		return r;
 	}
 	radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
@@ -3018,7 +3023,7 @@ static int evergreen_cp_start(struct radeon_device *rdev)
 
 	r = radeon_ring_lock(rdev, ring, evergreen_default_size + 19);
 	if (r) {
-		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+		drm_err(ddev, "radeon: cp failed to lock ring (%d).\n", r);
 		return r;
 	}
 
@@ -3826,6 +3831,7 @@ u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev)
 {
 	u32 reset_mask = 0;
 	u32 tmp;
+	struct drm_device *ddev = rdev_to_drm(rdev);
 
 	/* GRBM_STATUS */
 	tmp = RREG32(GRBM_STATUS);
@@ -3884,7 +3890,7 @@ u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev)
 
 	/* Skip MC reset as it's mostly likely not hung, just busy */
 	if (reset_mask & RADEON_RESET_MC) {
-		DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
+		drm_dbg(ddev, "MC busy: 0x%08X, clearing.\n", reset_mask);
 		reset_mask &= ~RADEON_RESET_MC;
 	}
 
@@ -4495,6 +4501,7 @@ int evergreen_irq_set(struct radeon_device *rdev)
 	u32 grbm_int_cntl = 0;
 	u32 dma_cntl, dma_cntl1 = 0;
 	u32 thermal_int = 0;
+	struct drm_device *ddev = rdev_to_drm(rdev);
 
 	if (!rdev->irq.installed) {
 		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -4520,40 +4527,40 @@ int evergreen_irq_set(struct radeon_device *rdev)
 	if (rdev->family >= CHIP_CAYMAN) {
 		/* enable CP interrupts on all rings */
 		if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
-			DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
+			drm_dbg(ddev, "%s : sw int gfx\n", __func__);
 			cp_int_cntl |= TIME_STAMP_INT_ENABLE;
 		}
 		if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
-			DRM_DEBUG("evergreen_irq_set: sw int cp1\n");
+			drm_dbg(ddev, "%s : sw int cp1\n", __func__);
 			cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
 		}
 		if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
-			DRM_DEBUG("evergreen_irq_set: sw int cp2\n");
+			drm_dbg(ddev, "%s : sw int cp2\n", __func__);
 			cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
 		}
 	} else {
 		if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
-			DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
+			drm_dbg(ddev, "%s : sw int gfx\n", __func__);
 			cp_int_cntl |= RB_INT_ENABLE;
 			cp_int_cntl |= TIME_STAMP_INT_ENABLE;
 		}
 	}
 
 	if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
-		DRM_DEBUG("r600_irq_set: sw int dma\n");
+		drm_dbg(ddev, "r600_irq_set: sw int dma\n");
 		dma_cntl |= TRAP_ENABLE;
 	}
 
 	if (rdev->family >= CHIP_CAYMAN) {
 		dma_cntl1 = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
 		if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
-			DRM_DEBUG("r600_irq_set: sw int dma1\n");
+			drm_dbg(ddev, "r600_irq_set: sw int dma1\n");
 			dma_cntl1 |= TRAP_ENABLE;
 		}
 	}
 
 	if (rdev->irq.dpm_thermal) {
-		DRM_DEBUG("dpm thermal\n");
+		drm_dbg(ddev, "dpm thermal\n");
 		thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
 	}
 
@@ -4713,6 +4720,7 @@ int evergreen_irq_process(struct radeon_device *rdev)
 	bool queue_thermal = false;
 	u32 status, addr;
 	const char *event_name;
+	struct drm_device *ddev = rdev_to_drm(rdev);
 
 	if (!rdev->ih.enabled || rdev->shutdown)
 		return IRQ_NONE;
@@ -4725,7 +4733,7 @@ int evergreen_irq_process(struct radeon_device *rdev)
 		return IRQ_NONE;
 
 	rptr = rdev->ih.rptr;
-	DRM_DEBUG("evergreen_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+	drm_dbg(ddev, "%s start: rptr %d, wptr %d\n", __func__, rptr, wptr);
 
 	/* Order reading of wptr vs. reading of IH ring data */
 	rmb();
@@ -4766,18 +4774,18 @@ int evergreen_irq_process(struct radeon_device *rdev)
 				mask = LB_D1_VLINE_INTERRUPT;
 				event_name = "vline";
 			} else {
-				DRM_DEBUG("Unhandled interrupt: %d %d\n",
+				drm_dbg(ddev, "Unhandled interrupt: %d %d\n",
 					  src_id, src_data);
 				break;
 			}
 
 			if (!(disp_int[crtc_idx] & mask)) {
-				DRM_DEBUG("IH: D%d %s - IH event w/o asserted irq bit?\n",
+				drm_dbg(ddev, "IH: D%d %s - IH event w/o asserted irq bit?\n",
 					  crtc_idx + 1, event_name);
 			}
 
 			disp_int[crtc_idx] &= ~mask;
-			DRM_DEBUG("IH: D%d %s\n", crtc_idx + 1, event_name);
+			drm_dbg(ddev, "IH: D%d %s\n", crtc_idx + 1, event_name);
 
 			break;
 		case 8: /* D1 page flip */
@@ -4786,7 +4794,7 @@ int evergreen_irq_process(struct radeon_device *rdev)
 		case 14: /* D4 page flip */
 		case 16: /* D5 page flip */
 		case 18: /* D6 page flip */
-			DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
+			drm_dbg(ddev, "IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
 			if (radeon_use_pflipirq > 0)
 				radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
 			break;
@@ -4804,39 +4812,39 @@ int evergreen_irq_process(struct radeon_device *rdev)
 				event_name = "HPD_RX";
 
 			} else {
-				DRM_DEBUG("Unhandled interrupt: %d %d\n",
+				drm_dbg(ddev, "Unhandled interrupt: %d %d\n",
 					  src_id, src_data);
 				break;
 			}
 
 			if (!(disp_int[hpd_idx] & mask))
-				DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+				drm_dbg(ddev, "IH: IH event w/o asserted irq bit?\n");
 
 			disp_int[hpd_idx] &= ~mask;
-			DRM_DEBUG("IH: %s%d\n", event_name, hpd_idx + 1);
+			drm_dbg(ddev, "IH: %s%d\n", event_name, hpd_idx + 1);
 
 			break;
 		case 44: /* hdmi */
 			afmt_idx = src_data;
 			if (afmt_idx > 5) {
-				DRM_ERROR("Unhandled interrupt: %d %d\n",
+				drm_err(ddev, "Unhandled interrupt: %d %d\n",
 					  src_id, src_data);
 				break;
 			}
 
 			if (!(afmt_status[afmt_idx] & AFMT_AZ_FORMAT_WTRIG))
-				DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+				drm_dbg(ddev, "IH: IH event w/o asserted irq bit?\n");
 
 			afmt_status[afmt_idx] &= ~AFMT_AZ_FORMAT_WTRIG;
 			queue_hdmi = true;
-			DRM_DEBUG("IH: HDMI%d\n", afmt_idx + 1);
+			drm_dbg(ddev, "IH: HDMI%d\n", afmt_idx + 1);
 			break;
 		case 96:
-			DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
+			drm_err(ddev, "SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
 			WREG32(SRBM_INT_ACK, 0x1);
 			break;
 		case 124: /* UVD */
-			DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
+			drm_dbg(ddev, "IH: UVD int: 0x%08x\n", src_data);
 			radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
 			break;
 		case 146:
@@ -4857,11 +4865,11 @@ int evergreen_irq_process(struct radeon_device *rdev)
 		case 176: /* CP_INT in ring buffer */
 		case 177: /* CP_INT in IB1 */
 		case 178: /* CP_INT in IB2 */
-			DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
+			drm_dbg(ddev, "IH: CP int: 0x%08x\n", src_data);
 			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
 			break;
 		case 181: /* CP EOP event */
-			DRM_DEBUG("IH: CP EOP\n");
+			drm_dbg(ddev, "IH: CP EOP\n");
 			if (rdev->family >= CHIP_CAYMAN) {
 				switch (src_data) {
 				case 0:
@@ -4878,30 +4886,30 @@ int evergreen_irq_process(struct radeon_device *rdev)
 				radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
 			break;
 		case 224: /* DMA trap event */
-			DRM_DEBUG("IH: DMA trap\n");
+			drm_dbg(ddev, "IH: DMA trap\n");
 			radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
 			break;
 		case 230: /* thermal low to high */
-			DRM_DEBUG("IH: thermal low to high\n");
+			drm_dbg(ddev, "IH: thermal low to high\n");
 			rdev->pm.dpm.thermal.high_to_low = false;
 			queue_thermal = true;
 			break;
 		case 231: /* thermal high to low */
-			DRM_DEBUG("IH: thermal high to low\n");
+			drm_dbg(ddev, "IH: thermal high to low\n");
 			rdev->pm.dpm.thermal.high_to_low = true;
 			queue_thermal = true;
 			break;
 		case 233: /* GUI IDLE */
-			DRM_DEBUG("IH: GUI idle\n");
+			drm_dbg(ddev, "IH: GUI idle\n");
 			break;
 		case 244: /* DMA trap event */
 			if (rdev->family >= CHIP_CAYMAN) {
-				DRM_DEBUG("IH: DMA1 trap\n");
+				drm_dbg(ddev, "IH: DMA1 trap\n");
 				radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
 			}
 			break;
 		default:
-			DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+			drm_dbg(ddev, "Unhandled interrupt: %d %d\n", src_id, src_data);
 			break;
 		}
 
@@ -5000,6 +5008,7 @@ static int evergreen_startup(struct radeon_device *rdev)
 {
 	struct radeon_ring *ring;
 	int r;
+	struct drm_device *ddev = rdev_to_drm(rdev);
 
 	/* enable pcie gen2 link */
 	evergreen_pcie_gen2_enable(rdev);
@@ -5016,7 +5025,7 @@ static int evergreen_startup(struct radeon_device *rdev)
 	if (ASIC_IS_DCE5(rdev) && !rdev->pm.dpm_enabled) {
 		r = ni_mc_load_microcode(rdev);
 		if (r) {
-			DRM_ERROR("Failed to load MC firmware!\n");
+			drm_err(ddev, "Failed to load MC firmware!\n");
 			return r;
 		}
 	}
@@ -5038,7 +5047,7 @@ static int evergreen_startup(struct radeon_device *rdev)
 		rdev->rlc.cs_data = evergreen_cs_data;
 		r = sumo_rlc_init(rdev);
 		if (r) {
-			DRM_ERROR("Failed to init rlc BOs!\n");
+			drm_err(ddev, "Failed to init rlc BOs!\n");
 			return r;
 		}
 	}
@@ -5071,7 +5080,7 @@ static int evergreen_startup(struct radeon_device *rdev)
 
 	r = r600_irq_init(rdev);
 	if (r) {
-		DRM_ERROR("radeon: IH init failed (%d).\n", r);
+		drm_err(ddev, "radeon: IH init failed (%d).\n", r);
 		radeon_irq_kms_fini(rdev);
 		return r;
 	}
@@ -5109,7 +5118,7 @@ static int evergreen_startup(struct radeon_device *rdev)
 
 	r = radeon_audio_init(rdev);
 	if (r) {
-		DRM_ERROR("radeon: audio init failed\n");
+		drm_err(ddev, "radeon: audio init failed\n");
 		return r;
 	}
 
@@ -5119,6 +5128,7 @@ static int evergreen_startup(struct radeon_device *rdev)
 int evergreen_resume(struct radeon_device *rdev)
 {
 	int r;
+	struct drm_device *ddev = rdev_to_drm(rdev);
 
 	/* reset the asic, the gfx blocks are often in a bad state
 	 * after the driver is unloaded or after a resume
@@ -5141,7 +5151,7 @@ int evergreen_resume(struct radeon_device *rdev)
 	rdev->accel_working = true;
 	r = evergreen_startup(rdev);
 	if (r) {
-		DRM_ERROR("evergreen startup failed on resume\n");
+		drm_err(ddev, "evergreen startup failed on resume\n");
 		rdev->accel_working = false;
 		return r;
 	}
@@ -5176,6 +5186,7 @@ int evergreen_suspend(struct radeon_device *rdev)
 int evergreen_init(struct radeon_device *rdev)
 {
 	int r;
+	struct drm_device *ddev = rdev_to_drm(rdev);
 
 	/* Read BIOS */
 	if (!radeon_get_bios(rdev)) {
@@ -5201,7 +5212,7 @@ int evergreen_init(struct radeon_device *rdev)
 			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
 			return -EINVAL;
 		}
-		DRM_INFO("GPU not posted. posting now...\n");
+		drm_info(ddev, "GPU not posted. posting now...\n");
 		atom_asic_init(rdev->mode_info.atom_context);
 	}
 	/* init golden registers */
@@ -5233,7 +5244,7 @@ int evergreen_init(struct radeon_device *rdev)
 		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
 			r = ni_init_microcode(rdev);
 			if (r) {
-				DRM_ERROR("Failed to load firmware!\n");
+				drm_err(ddev, "Failed to load firmware!\n");
 				return r;
 			}
 		}
@@ -5241,7 +5252,7 @@ int evergreen_init(struct radeon_device *rdev)
 		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
 			r = r600_init_microcode(rdev);
 			if (r) {
-				DRM_ERROR("Failed to load firmware!\n");
+				drm_err(ddev, "Failed to load firmware!\n");
 				return r;
 			}
 		}
@@ -5287,7 +5298,7 @@ int evergreen_init(struct radeon_device *rdev)
 	 */
 	if (ASIC_IS_DCE5(rdev)) {
 		if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
-			DRM_ERROR("radeon: MC ucode required for NI+.\n");
+			drm_err(ddev, "radeon: MC ucode required for NI+.\n");
 			return -EINVAL;
 		}
 	}
@@ -5323,6 +5334,7 @@ void evergreen_fini(struct radeon_device *rdev)
 void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
 {
 	u32 link_width_cntl, speed_cntl;
+	struct drm_device *ddev = rdev_to_drm(rdev);
 
 	if (radeon_pcie_gen2 == 0)
 		return;
@@ -5343,11 +5355,11 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
 
 	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
 	if (speed_cntl & LC_CURRENT_DATA_RATE) {
-		DRM_INFO("PCIE gen 2 link speeds already enabled\n");
+		drm_info(ddev, "PCIE gen 2 link speeds already enabled\n");
 		return;
 	}
 
-	DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
+	drm_info(ddev, "enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
 
 	if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
 	    (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
-- 
2.43.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ