2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <dev/drm2/drmP.h>
33 #include <dev/drm2/radeon/radeon_drm.h>
34 #include "radeon_reg.h"
36 #include "radeon_asic.h"
43 #include "r100_reg_safe.h"
44 #include "rn50_reg_safe.h"
47 #define FIRMWARE_R100 "radeonkmsfw_R100_cp"
48 #define FIRMWARE_R200 "radeonkmsfw_R200_cp"
49 #define FIRMWARE_R300 "radeonkmsfw_R300_cp"
50 #define FIRMWARE_R420 "radeonkmsfw_R420_cp"
51 #define FIRMWARE_RS690 "radeonkmsfw_RS690_cp"
52 #define FIRMWARE_RS600 "radeonkmsfw_RS600_cp"
53 #define FIRMWARE_R520 "radeonkmsfw_R520_cp"
55 #include "r100_track.h"
57 /* This files gather functions specifics to:
58 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
59 * and others in some cases.
63 * r100_wait_for_vblank - vblank wait asic callback.
65 * @rdev: radeon_device pointer
66 * @crtc: crtc to wait for vblank on
68 * Wait for vblank on the requested crtc (r1xx-r4xx).
70 void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
74 if (crtc >= rdev->num_crtc)
78 if (RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN) {
79 for (i = 0; i < rdev->usec_timeout; i++) {
80 if (!(RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR))
84 for (i = 0; i < rdev->usec_timeout; i++) {
85 if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)
91 if (RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN) {
92 for (i = 0; i < rdev->usec_timeout; i++) {
93 if (!(RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR))
97 for (i = 0; i < rdev->usec_timeout; i++) {
98 if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR)
107 * r100_pre_page_flip - pre-pageflip callback.
109 * @rdev: radeon_device pointer
110 * @crtc: crtc to prepare for pageflip on
112 * Pre-pageflip callback (r1xx-r4xx).
113 * Enables the pageflip irq (vblank irq).
115 void r100_pre_page_flip(struct radeon_device *rdev, int crtc)
117 /* enable the pflip int */
118 radeon_irq_kms_pflip_irq_get(rdev, crtc);
122 * r100_post_page_flip - pos-pageflip callback.
124 * @rdev: radeon_device pointer
125 * @crtc: crtc to cleanup pageflip on
127 * Post-pageflip callback (r1xx-r4xx).
128 * Disables the pageflip irq (vblank irq).
130 void r100_post_page_flip(struct radeon_device *rdev, int crtc)
132 /* disable the pflip int */
133 radeon_irq_kms_pflip_irq_put(rdev, crtc);
137 * r100_page_flip - pageflip callback.
139 * @rdev: radeon_device pointer
140 * @crtc_id: crtc to cleanup pageflip on
141 * @crtc_base: new address of the crtc (GPU MC address)
143 * Does the actual pageflip (r1xx-r4xx).
144 * During vblank we take the crtc lock and wait for the update_pending
145 * bit to go high, when it does, we release the lock, and allow the
146 * double buffered update to take place.
147 * Returns the current update pending status.
149 u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
151 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
152 u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
155 /* Lock the graphics update lock */
156 /* update the scanout addresses */
157 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
159 /* Wait for update_pending to go high. */
160 for (i = 0; i < rdev->usec_timeout; i++) {
161 if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)
165 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
167 /* Unlock the lock, so double-buffering can take place inside vblank */
168 tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK;
169 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
171 /* Return current update_pending status: */
172 return RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET;
176 * r100_pm_get_dynpm_state - look up dynpm power state callback.
178 * @rdev: radeon_device pointer
180 * Look up the optimal power state based on the
181 * current state of the GPU (r1xx-r5xx).
182 * Used for dynpm only.
184 void r100_pm_get_dynpm_state(struct radeon_device *rdev)
187 rdev->pm.dynpm_can_upclock = true;
188 rdev->pm.dynpm_can_downclock = true;
190 switch (rdev->pm.dynpm_planned_action) {
191 case DYNPM_ACTION_MINIMUM:
192 rdev->pm.requested_power_state_index = 0;
193 rdev->pm.dynpm_can_downclock = false;
195 case DYNPM_ACTION_DOWNCLOCK:
196 if (rdev->pm.current_power_state_index == 0) {
197 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
198 rdev->pm.dynpm_can_downclock = false;
200 if (rdev->pm.active_crtc_count > 1) {
201 for (i = 0; i < rdev->pm.num_power_states; i++) {
202 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
204 else if (i >= rdev->pm.current_power_state_index) {
205 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
208 rdev->pm.requested_power_state_index = i;
213 rdev->pm.requested_power_state_index =
214 rdev->pm.current_power_state_index - 1;
216 /* don't use the power state if crtcs are active and no display flag is set */
217 if ((rdev->pm.active_crtc_count > 0) &&
218 (rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags &
219 RADEON_PM_MODE_NO_DISPLAY)) {
220 rdev->pm.requested_power_state_index++;
223 case DYNPM_ACTION_UPCLOCK:
224 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
225 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
226 rdev->pm.dynpm_can_upclock = false;
228 if (rdev->pm.active_crtc_count > 1) {
229 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
230 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
232 else if (i <= rdev->pm.current_power_state_index) {
233 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
236 rdev->pm.requested_power_state_index = i;
241 rdev->pm.requested_power_state_index =
242 rdev->pm.current_power_state_index + 1;
245 case DYNPM_ACTION_DEFAULT:
246 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
247 rdev->pm.dynpm_can_upclock = false;
249 case DYNPM_ACTION_NONE:
251 DRM_ERROR("Requested mode for not defined action\n");
254 /* only one clock mode per power state */
255 rdev->pm.requested_clock_mode_index = 0;
257 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
258 rdev->pm.power_state[rdev->pm.requested_power_state_index].
259 clock_info[rdev->pm.requested_clock_mode_index].sclk,
260 rdev->pm.power_state[rdev->pm.requested_power_state_index].
261 clock_info[rdev->pm.requested_clock_mode_index].mclk,
262 rdev->pm.power_state[rdev->pm.requested_power_state_index].
267 * r100_pm_init_profile - Initialize power profiles callback.
269 * @rdev: radeon_device pointer
271 * Initialize the power states used in profile mode
273 * Used for profile mode only.
275 void r100_pm_init_profile(struct radeon_device *rdev)
278 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
279 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
280 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
281 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
283 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
284 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
285 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
286 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
288 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
289 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
290 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
291 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
293 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
294 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
295 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
296 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
298 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
299 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
300 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
301 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
303 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
304 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
305 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
306 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
308 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
309 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
310 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
311 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
315 * r100_pm_misc - set additional pm hw parameters callback.
317 * @rdev: radeon_device pointer
319 * Set non-clock parameters associated with a power state
320 * (voltage, pcie lanes, etc.) (r1xx-r4xx).
322 void r100_pm_misc(struct radeon_device *rdev)
324 int requested_index = rdev->pm.requested_power_state_index;
325 struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
326 struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
327 u32 tmp, sclk_cntl, sclk_cntl2, sclk_more_cntl;
329 if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
330 if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
331 tmp = RREG32(voltage->gpio.reg);
332 if (voltage->active_high)
333 tmp |= voltage->gpio.mask;
335 tmp &= ~(voltage->gpio.mask);
336 WREG32(voltage->gpio.reg, tmp);
338 DRM_UDELAY(voltage->delay);
340 tmp = RREG32(voltage->gpio.reg);
341 if (voltage->active_high)
342 tmp &= ~voltage->gpio.mask;
344 tmp |= voltage->gpio.mask;
345 WREG32(voltage->gpio.reg, tmp);
347 DRM_UDELAY(voltage->delay);
351 sclk_cntl = RREG32_PLL(SCLK_CNTL);
352 sclk_cntl2 = RREG32_PLL(SCLK_CNTL2);
353 sclk_cntl2 &= ~REDUCED_SPEED_SCLK_SEL(3);
354 sclk_more_cntl = RREG32_PLL(SCLK_MORE_CNTL);
355 sclk_more_cntl &= ~VOLTAGE_DELAY_SEL(3);
356 if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
357 sclk_more_cntl |= REDUCED_SPEED_SCLK_EN;
358 if (ps->misc & ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE)
359 sclk_cntl2 |= REDUCED_SPEED_SCLK_MODE;
361 sclk_cntl2 &= ~REDUCED_SPEED_SCLK_MODE;
362 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2)
363 sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(0);
364 else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4)
365 sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(2);
367 sclk_more_cntl &= ~REDUCED_SPEED_SCLK_EN;
369 if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
370 sclk_more_cntl |= IO_CG_VOLTAGE_DROP;
371 if (voltage->delay) {
372 sclk_more_cntl |= VOLTAGE_DROP_SYNC;
373 switch (voltage->delay) {
375 sclk_more_cntl |= VOLTAGE_DELAY_SEL(0);
378 sclk_more_cntl |= VOLTAGE_DELAY_SEL(1);
381 sclk_more_cntl |= VOLTAGE_DELAY_SEL(2);
384 sclk_more_cntl |= VOLTAGE_DELAY_SEL(3);
388 sclk_more_cntl &= ~VOLTAGE_DROP_SYNC;
390 sclk_more_cntl &= ~IO_CG_VOLTAGE_DROP;
392 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
393 sclk_cntl &= ~FORCE_HDP;
395 sclk_cntl |= FORCE_HDP;
397 WREG32_PLL(SCLK_CNTL, sclk_cntl);
398 WREG32_PLL(SCLK_CNTL2, sclk_cntl2);
399 WREG32_PLL(SCLK_MORE_CNTL, sclk_more_cntl);
402 if ((rdev->flags & RADEON_IS_PCIE) &&
403 !(rdev->flags & RADEON_IS_IGP) &&
404 rdev->asic->pm.set_pcie_lanes &&
406 rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
407 radeon_set_pcie_lanes(rdev,
409 DRM_DEBUG_DRIVER("Setting: p: %d\n", ps->pcie_lanes);
414 * r100_pm_prepare - pre-power state change callback.
416 * @rdev: radeon_device pointer
418 * Prepare for a power state change (r1xx-r4xx).
420 void r100_pm_prepare(struct radeon_device *rdev)
422 struct drm_device *ddev = rdev->ddev;
423 struct drm_crtc *crtc;
424 struct radeon_crtc *radeon_crtc;
427 /* disable any active CRTCs */
428 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
429 radeon_crtc = to_radeon_crtc(crtc);
430 if (radeon_crtc->enabled) {
431 if (radeon_crtc->crtc_id) {
432 tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
433 tmp |= RADEON_CRTC2_DISP_REQ_EN_B;
434 WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
436 tmp = RREG32(RADEON_CRTC_GEN_CNTL);
437 tmp |= RADEON_CRTC_DISP_REQ_EN_B;
438 WREG32(RADEON_CRTC_GEN_CNTL, tmp);
445 * r100_pm_finish - post-power state change callback.
447 * @rdev: radeon_device pointer
449 * Clean up after a power state change (r1xx-r4xx).
451 void r100_pm_finish(struct radeon_device *rdev)
453 struct drm_device *ddev = rdev->ddev;
454 struct drm_crtc *crtc;
455 struct radeon_crtc *radeon_crtc;
458 /* enable any active CRTCs */
459 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
460 radeon_crtc = to_radeon_crtc(crtc);
461 if (radeon_crtc->enabled) {
462 if (radeon_crtc->crtc_id) {
463 tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
464 tmp &= ~RADEON_CRTC2_DISP_REQ_EN_B;
465 WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
467 tmp = RREG32(RADEON_CRTC_GEN_CNTL);
468 tmp &= ~RADEON_CRTC_DISP_REQ_EN_B;
469 WREG32(RADEON_CRTC_GEN_CNTL, tmp);
476 * r100_gui_idle - gui idle callback.
478 * @rdev: radeon_device pointer
480 * Check of the GUI (2D/3D engines) are idle (r1xx-r5xx).
481 * Returns true if idle, false if not.
483 bool r100_gui_idle(struct radeon_device *rdev)
485 if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE)
491 /* hpd for digital panel detect/disconnect */
493 * r100_hpd_sense - hpd sense callback.
495 * @rdev: radeon_device pointer
496 * @hpd: hpd (hotplug detect) pin
498 * Checks if a digital monitor is connected (r1xx-r4xx).
499 * Returns true if connected, false if not connected.
501 bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
503 bool connected = false;
507 if (RREG32(RADEON_FP_GEN_CNTL) & RADEON_FP_DETECT_SENSE)
511 if (RREG32(RADEON_FP2_GEN_CNTL) & RADEON_FP2_DETECT_SENSE)
521 * r100_hpd_set_polarity - hpd set polarity callback.
523 * @rdev: radeon_device pointer
524 * @hpd: hpd (hotplug detect) pin
526 * Set the polarity of the hpd pin (r1xx-r4xx).
528 void r100_hpd_set_polarity(struct radeon_device *rdev,
529 enum radeon_hpd_id hpd)
532 bool connected = r100_hpd_sense(rdev, hpd);
536 tmp = RREG32(RADEON_FP_GEN_CNTL);
538 tmp &= ~RADEON_FP_DETECT_INT_POL;
540 tmp |= RADEON_FP_DETECT_INT_POL;
541 WREG32(RADEON_FP_GEN_CNTL, tmp);
544 tmp = RREG32(RADEON_FP2_GEN_CNTL);
546 tmp &= ~RADEON_FP2_DETECT_INT_POL;
548 tmp |= RADEON_FP2_DETECT_INT_POL;
549 WREG32(RADEON_FP2_GEN_CNTL, tmp);
557 * r100_hpd_init - hpd setup callback.
559 * @rdev: radeon_device pointer
561 * Setup the hpd pins used by the card (r1xx-r4xx).
562 * Set the polarity, and enable the hpd interrupts.
564 void r100_hpd_init(struct radeon_device *rdev)
566 struct drm_device *dev = rdev->ddev;
567 struct drm_connector *connector;
570 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
571 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
572 enable |= 1 << radeon_connector->hpd.hpd;
573 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
575 radeon_irq_kms_enable_hpd(rdev, enable);
579 * r100_hpd_fini - hpd tear down callback.
581 * @rdev: radeon_device pointer
583 * Tear down the hpd pins used by the card (r1xx-r4xx).
584 * Disable the hpd interrupts.
586 void r100_hpd_fini(struct radeon_device *rdev)
588 struct drm_device *dev = rdev->ddev;
589 struct drm_connector *connector;
590 unsigned disable = 0;
592 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
593 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
594 disable |= 1 << radeon_connector->hpd.hpd;
596 radeon_irq_kms_disable_hpd(rdev, disable);
602 void r100_pci_gart_tlb_flush(struct radeon_device *rdev)
604 /* TODO: can we do somethings here ? */
605 /* It seems hw only cache one entry so we should discard this
606 * entry otherwise if first GPU GART read hit this entry it
607 * could end up in wrong address. */
610 int r100_pci_gart_init(struct radeon_device *rdev)
614 if (rdev->gart.ptr) {
615 DRM_ERROR("R100 PCI GART already initialized\n");
618 /* Initialize common gart structure */
619 r = radeon_gart_init(rdev);
622 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
623 rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
624 rdev->asic->gart.set_page = &r100_pci_gart_set_page;
625 return radeon_gart_table_ram_alloc(rdev);
628 int r100_pci_gart_enable(struct radeon_device *rdev)
632 radeon_gart_restore(rdev);
633 /* discard memory request outside of configured range */
634 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
635 WREG32(RADEON_AIC_CNTL, tmp);
636 /* set address range for PCI address translate */
637 WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_start);
638 WREG32(RADEON_AIC_HI_ADDR, rdev->mc.gtt_end);
639 /* set PCI GART page-table base address */
640 WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
641 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
642 WREG32(RADEON_AIC_CNTL, tmp);
643 r100_pci_gart_tlb_flush(rdev);
644 DRM_INFO("PCI GART of %uM enabled (table at 0x%016llX).\n",
645 (unsigned)(rdev->mc.gtt_size >> 20),
646 (unsigned long long)rdev->gart.table_addr);
647 rdev->gart.ready = true;
651 void r100_pci_gart_disable(struct radeon_device *rdev)
655 /* discard memory request outside of configured range */
656 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
657 WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN);
658 WREG32(RADEON_AIC_LO_ADDR, 0);
659 WREG32(RADEON_AIC_HI_ADDR, 0);
662 int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
664 u32 *gtt = rdev->gart.ptr;
666 if (i < 0 || i > rdev->gart.num_gpu_pages) {
669 gtt[i] = cpu_to_le32(lower_32_bits(addr));
673 void r100_pci_gart_fini(struct radeon_device *rdev)
675 radeon_gart_fini(rdev);
676 r100_pci_gart_disable(rdev);
677 radeon_gart_table_ram_free(rdev);
680 int r100_irq_set(struct radeon_device *rdev)
684 if (!rdev->irq.installed) {
685 DRM_ERROR("Can't enable IRQ/MSI because no handler is installed\n");
686 WREG32(R_000040_GEN_INT_CNTL, 0);
689 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
690 tmp |= RADEON_SW_INT_ENABLE;
692 if (rdev->irq.crtc_vblank_int[0] ||
693 atomic_read(&rdev->irq.pflip[0])) {
694 tmp |= RADEON_CRTC_VBLANK_MASK;
696 if (rdev->irq.crtc_vblank_int[1] ||
697 atomic_read(&rdev->irq.pflip[1])) {
698 tmp |= RADEON_CRTC2_VBLANK_MASK;
700 if (rdev->irq.hpd[0]) {
701 tmp |= RADEON_FP_DETECT_MASK;
703 if (rdev->irq.hpd[1]) {
704 tmp |= RADEON_FP2_DETECT_MASK;
706 WREG32(RADEON_GEN_INT_CNTL, tmp);
710 void r100_irq_disable(struct radeon_device *rdev)
714 WREG32(R_000040_GEN_INT_CNTL, 0);
715 /* Wait and acknowledge irq */
717 tmp = RREG32(R_000044_GEN_INT_STATUS);
718 WREG32(R_000044_GEN_INT_STATUS, tmp);
721 static uint32_t r100_irq_ack(struct radeon_device *rdev)
723 uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
724 uint32_t irq_mask = RADEON_SW_INT_TEST |
725 RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT |
726 RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT;
729 WREG32(RADEON_GEN_INT_STATUS, irqs);
731 return irqs & irq_mask;
734 irqreturn_t r100_irq_process(struct radeon_device *rdev)
736 uint32_t status, msi_rearm;
737 bool queue_hotplug = false;
739 status = r100_irq_ack(rdev);
743 if (rdev->shutdown) {
748 if (status & RADEON_SW_INT_TEST) {
749 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
751 /* Vertical blank interrupts */
752 if (status & RADEON_CRTC_VBLANK_STAT) {
753 if (rdev->irq.crtc_vblank_int[0]) {
754 drm_handle_vblank(rdev->ddev, 0);
755 rdev->pm.vblank_sync = true;
756 DRM_WAKEUP(&rdev->irq.vblank_queue);
758 if (atomic_read(&rdev->irq.pflip[0]))
759 radeon_crtc_handle_flip(rdev, 0);
761 if (status & RADEON_CRTC2_VBLANK_STAT) {
762 if (rdev->irq.crtc_vblank_int[1]) {
763 drm_handle_vblank(rdev->ddev, 1);
764 rdev->pm.vblank_sync = true;
765 DRM_WAKEUP(&rdev->irq.vblank_queue);
767 if (atomic_read(&rdev->irq.pflip[1]))
768 radeon_crtc_handle_flip(rdev, 1);
770 if (status & RADEON_FP_DETECT_STAT) {
771 queue_hotplug = true;
774 if (status & RADEON_FP2_DETECT_STAT) {
775 queue_hotplug = true;
778 status = r100_irq_ack(rdev);
781 taskqueue_enqueue(rdev->tq, &rdev->hotplug_work);
782 if (rdev->msi_enabled) {
783 switch (rdev->family) {
786 msi_rearm = RREG32(RADEON_AIC_CNTL) & ~RS400_MSI_REARM;
787 WREG32(RADEON_AIC_CNTL, msi_rearm);
788 WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM);
791 WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN);
798 u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
801 return RREG32(RADEON_CRTC_CRNT_FRAME);
803 return RREG32(RADEON_CRTC2_CRNT_FRAME);
806 /* Who ever call radeon_fence_emit should call ring_lock and ask
807 * for enough space (today caller are ib schedule and buffer move) */
808 void r100_fence_ring_emit(struct radeon_device *rdev,
809 struct radeon_fence *fence)
811 struct radeon_ring *ring = &rdev->ring[fence->ring];
813 /* We have to make sure that caches are flushed before
814 * CPU might read something from VRAM. */
815 radeon_ring_write(ring, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
816 radeon_ring_write(ring, RADEON_RB3D_DC_FLUSH_ALL);
817 radeon_ring_write(ring, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
818 radeon_ring_write(ring, RADEON_RB3D_ZC_FLUSH_ALL);
819 /* Wait until IDLE & CLEAN */
820 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
821 radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
822 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
823 radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
824 RADEON_HDP_READ_BUFFER_INVALIDATE);
825 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
826 radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
827 /* Emit fence sequence & fire IRQ */
828 radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
829 radeon_ring_write(ring, fence->seq);
830 radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
831 radeon_ring_write(ring, RADEON_SW_INT_FIRE);
834 void r100_semaphore_ring_emit(struct radeon_device *rdev,
835 struct radeon_ring *ring,
836 struct radeon_semaphore *semaphore,
839 /* Unused on older asics, since we don't have semaphores or multiple rings */
840 panic("%s: Unused on older asics", __func__);
843 int r100_copy_blit(struct radeon_device *rdev,
846 unsigned num_gpu_pages,
847 struct radeon_fence **fence)
849 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
851 uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
853 uint32_t stride_pixels;
858 /* radeon limited to 16k stride */
859 stride_bytes &= 0x3fff;
860 /* radeon pitch is /64 */
861 pitch = stride_bytes / 64;
862 stride_pixels = stride_bytes / 4;
863 num_loops = DIV_ROUND_UP(num_gpu_pages, 8191);
865 /* Ask for enough room for blit + flush + fence */
866 ndw = 64 + (10 * num_loops);
867 r = radeon_ring_lock(rdev, ring, ndw);
869 DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
872 while (num_gpu_pages > 0) {
873 cur_pages = num_gpu_pages;
874 if (cur_pages > 8191) {
877 num_gpu_pages -= cur_pages;
879 /* pages are in Y direction - height
880 page width in X direction - width */
881 radeon_ring_write(ring, PACKET3(PACKET3_BITBLT_MULTI, 8));
882 radeon_ring_write(ring,
883 RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
884 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
885 RADEON_GMC_SRC_CLIPPING |
886 RADEON_GMC_DST_CLIPPING |
887 RADEON_GMC_BRUSH_NONE |
888 (RADEON_COLOR_FORMAT_ARGB8888 << 8) |
889 RADEON_GMC_SRC_DATATYPE_COLOR |
891 RADEON_DP_SRC_SOURCE_MEMORY |
892 RADEON_GMC_CLR_CMP_CNTL_DIS |
893 RADEON_GMC_WR_MSK_DIS);
894 radeon_ring_write(ring, (pitch << 22) | (src_offset >> 10));
895 radeon_ring_write(ring, (pitch << 22) | (dst_offset >> 10));
896 radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
897 radeon_ring_write(ring, 0);
898 radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
899 radeon_ring_write(ring, num_gpu_pages);
900 radeon_ring_write(ring, num_gpu_pages);
901 radeon_ring_write(ring, cur_pages | (stride_pixels << 16));
903 radeon_ring_write(ring, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
904 radeon_ring_write(ring, RADEON_RB2D_DC_FLUSH_ALL);
905 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
906 radeon_ring_write(ring,
907 RADEON_WAIT_2D_IDLECLEAN |
908 RADEON_WAIT_HOST_IDLECLEAN |
909 RADEON_WAIT_DMA_GUI_IDLE);
911 r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
913 radeon_ring_unlock_commit(rdev, ring);
917 static int r100_cp_wait_for_idle(struct radeon_device *rdev)
922 for (i = 0; i < rdev->usec_timeout; i++) {
923 tmp = RREG32(R_000E40_RBBM_STATUS);
924 if (!G_000E40_CP_CMDSTRM_BUSY(tmp)) {
932 void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
936 r = radeon_ring_lock(rdev, ring, 2);
940 radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
941 radeon_ring_write(ring,
942 RADEON_ISYNC_ANY2D_IDLE3D |
943 RADEON_ISYNC_ANY3D_IDLE2D |
944 RADEON_ISYNC_WAIT_IDLEGUI |
945 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
946 radeon_ring_unlock_commit(rdev, ring);
950 /* Load the microcode for the CP */
951 static int r100_cp_init_microcode(struct radeon_device *rdev)
953 const char *fw_name = NULL;
958 if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) ||
959 (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) ||
960 (rdev->family == CHIP_RS200)) {
961 DRM_INFO("Loading R100 Microcode\n");
962 fw_name = FIRMWARE_R100;
963 } else if ((rdev->family == CHIP_R200) ||
964 (rdev->family == CHIP_RV250) ||
965 (rdev->family == CHIP_RV280) ||
966 (rdev->family == CHIP_RS300)) {
967 DRM_INFO("Loading R200 Microcode\n");
968 fw_name = FIRMWARE_R200;
969 } else if ((rdev->family == CHIP_R300) ||
970 (rdev->family == CHIP_R350) ||
971 (rdev->family == CHIP_RV350) ||
972 (rdev->family == CHIP_RV380) ||
973 (rdev->family == CHIP_RS400) ||
974 (rdev->family == CHIP_RS480)) {
975 DRM_INFO("Loading R300 Microcode\n");
976 fw_name = FIRMWARE_R300;
977 } else if ((rdev->family == CHIP_R420) ||
978 (rdev->family == CHIP_R423) ||
979 (rdev->family == CHIP_RV410)) {
980 DRM_INFO("Loading R400 Microcode\n");
981 fw_name = FIRMWARE_R420;
982 } else if ((rdev->family == CHIP_RS690) ||
983 (rdev->family == CHIP_RS740)) {
984 DRM_INFO("Loading RS690/RS740 Microcode\n");
985 fw_name = FIRMWARE_RS690;
986 } else if (rdev->family == CHIP_RS600) {
987 DRM_INFO("Loading RS600 Microcode\n");
988 fw_name = FIRMWARE_RS600;
989 } else if ((rdev->family == CHIP_RV515) ||
990 (rdev->family == CHIP_R520) ||
991 (rdev->family == CHIP_RV530) ||
992 (rdev->family == CHIP_R580) ||
993 (rdev->family == CHIP_RV560) ||
994 (rdev->family == CHIP_RV570)) {
995 DRM_INFO("Loading R500 Microcode\n");
996 fw_name = FIRMWARE_R520;
1000 rdev->me_fw = firmware_get(fw_name);
1001 if (rdev->me_fw == NULL) {
1002 DRM_ERROR("radeon_cp: Failed to load firmware \"%s\"\n",
1005 } else if (rdev->me_fw->datasize % 8) {
1007 "radeon_cp: Bogus length %zu in firmware \"%s\"\n",
1008 rdev->me_fw->datasize, fw_name);
1010 firmware_put(rdev->me_fw, FIRMWARE_UNLOAD);
1017 * r100_cp_fini_microcode - drop the firmware image reference
1019 * @rdev: radeon_device pointer
1021 * Drop the me firmware image reference.
1022 * Called at driver shutdown.
1024 static void r100_cp_fini_microcode (struct radeon_device *rdev)
1027 if (rdev->me_fw != NULL) {
1028 firmware_put(rdev->me_fw, FIRMWARE_UNLOAD);
1033 static void r100_cp_load_microcode(struct radeon_device *rdev)
1035 const __be32 *fw_data;
1038 if (r100_gui_wait_for_idle(rdev)) {
1039 DRM_ERROR("Failed to wait GUI idle while "
1040 "programming pipes. Bad things might happen.\n");
1044 size = rdev->me_fw->datasize / 4;
1045 fw_data = (const __be32 *)rdev->me_fw->data;
1046 WREG32(RADEON_CP_ME_RAM_ADDR, 0);
1047 for (i = 0; i < size; i += 2) {
1048 WREG32(RADEON_CP_ME_RAM_DATAH,
1049 be32_to_cpup(&fw_data[i]));
1050 WREG32(RADEON_CP_ME_RAM_DATAL,
1051 be32_to_cpup(&fw_data[i + 1]));
1056 int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
1058 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1062 unsigned pre_write_timer;
1063 unsigned pre_write_limit;
1064 unsigned indirect2_start;
1065 unsigned indirect1_start;
1069 if (r100_debugfs_cp_init(rdev)) {
1070 DRM_ERROR("Failed to register debugfs file for CP !\n");
1073 r = r100_cp_init_microcode(rdev);
1075 DRM_ERROR("Failed to load firmware!\n");
1080 /* Align ring size */
1081 rb_bufsz = drm_order(ring_size / 8);
1082 ring_size = (1 << (rb_bufsz + 1)) * 4;
1083 r100_cp_load_microcode(rdev);
1084 r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
1085 RADEON_CP_RB_RPTR, RADEON_CP_RB_WPTR,
1086 0, 0x7fffff, RADEON_CP_PACKET2);
1090 /* Each time the cp read 1024 bytes (16 dword/quadword) update
1091 * the rptr copy in system ram */
1093 /* cp will read 128bytes at a time (4 dwords) */
1095 ring->align_mask = 16 - 1;
1096 /* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
1097 pre_write_timer = 64;
1098 /* Force CP_RB_WPTR write if written more than one time before the
1101 pre_write_limit = 0;
1102 /* Setup the cp cache like this (cache size is 96 dwords) :
1104 * INDIRECT1 16 to 79
1105 * INDIRECT2 80 to 95
1106 * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
1107 * indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords))
1108 * indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
1109 * Idea being that most of the gpu cmd will be through indirect1 buffer
1110 * so it gets the bigger cache.
1112 indirect2_start = 80;
1113 indirect1_start = 16;
1115 WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
1116 tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
1117 REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
1118 REG_SET(RADEON_MAX_FETCH, max_fetch));
1120 tmp |= RADEON_BUF_SWAP_32BIT;
1122 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE);
1124 /* Set ring address */
1125 DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)ring->gpu_addr);
1126 WREG32(RADEON_CP_RB_BASE, ring->gpu_addr);
1127 /* Force read & write ptr to 0 */
1128 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE);
1129 WREG32(RADEON_CP_RB_RPTR_WR, 0);
1131 WREG32(RADEON_CP_RB_WPTR, ring->wptr);
1133 /* set the wb address whether it's enabled or not */
1134 WREG32(R_00070C_CP_RB_RPTR_ADDR,
1135 S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) >> 2));
1136 WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET);
1138 if (rdev->wb.enabled)
1139 WREG32(R_000770_SCRATCH_UMSK, 0xff);
1141 tmp |= RADEON_RB_NO_UPDATE;
1142 WREG32(R_000770_SCRATCH_UMSK, 0);
1145 WREG32(RADEON_CP_RB_CNTL, tmp);
1147 ring->rptr = RREG32(RADEON_CP_RB_RPTR);
1148 /* Set cp mode to bus mastering & enable cp*/
1149 WREG32(RADEON_CP_CSQ_MODE,
1150 REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
1151 REG_SET(RADEON_INDIRECT1_START, indirect1_start));
1152 WREG32(RADEON_CP_RB_WPTR_DELAY, 0);
1153 WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D);
1154 WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
1156 /* at this point everything should be setup correctly to enable master */
1157 pci_enable_busmaster(rdev->dev);
1159 radeon_ring_start(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
1160 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
1162 DRM_ERROR("radeon: cp isn't working (%d).\n", r);
1166 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
1168 if (!ring->rptr_save_reg /* not resuming from suspend */
1169 && radeon_ring_supports_scratch_reg(rdev, ring)) {
1170 r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
1172 DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
1173 ring->rptr_save_reg = 0;
1179 void r100_cp_fini(struct radeon_device *rdev)
1181 if (r100_cp_wait_for_idle(rdev)) {
1182 DRM_ERROR("Wait for CP idle timeout, shutting down CP.\n");
1185 r100_cp_disable(rdev);
1186 radeon_scratch_free(rdev, rdev->ring[RADEON_RING_TYPE_GFX_INDEX].rptr_save_reg);
1187 radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
1188 DRM_INFO("radeon: cp finalized\n");
1191 void r100_cp_disable(struct radeon_device *rdev)
1194 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1195 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1196 WREG32(RADEON_CP_CSQ_MODE, 0);
1197 WREG32(RADEON_CP_CSQ_CNTL, 0);
1198 WREG32(R_000770_SCRATCH_UMSK, 0);
1199 if (r100_gui_wait_for_idle(rdev)) {
1200 DRM_ERROR("Failed to wait GUI idle while "
1201 "programming pipes. Bad things might happen.\n");
1208 int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
1209 struct radeon_cs_packet *pkt,
1216 struct radeon_cs_reloc *reloc;
1219 r = r100_cs_packet_next_reloc(p, &reloc);
1221 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1223 r100_cs_dump_packet(p, pkt);
1227 value = radeon_get_ib_value(p, idx);
1228 tmp = value & 0x003fffff;
1229 tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
1231 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1232 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1233 tile_flags |= RADEON_DST_TILE_MACRO;
1234 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
1235 if (reg == RADEON_SRC_PITCH_OFFSET) {
1236 DRM_ERROR("Cannot src blit from microtiled surface\n");
1237 r100_cs_dump_packet(p, pkt);
1240 tile_flags |= RADEON_DST_TILE_MICRO;
1244 p->ib.ptr[idx] = (value & 0x3fc00000) | tmp;
1246 p->ib.ptr[idx] = (value & 0xffc00000) | tmp;
1250 int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
1251 struct radeon_cs_packet *pkt,
1255 struct radeon_cs_reloc *reloc;
1256 struct r100_cs_track *track;
1258 volatile uint32_t *ib;
1262 track = (struct r100_cs_track *)p->track;
1263 c = radeon_get_ib_value(p, idx++) & 0x1F;
1265 DRM_ERROR("Only 16 vertex buffers are allowed %d\n",
1267 r100_cs_dump_packet(p, pkt);
1270 track->num_arrays = c;
1271 for (i = 0; i < (c - 1); i+=2, idx+=3) {
1272 r = r100_cs_packet_next_reloc(p, &reloc);
1274 DRM_ERROR("No reloc for packet3 %d\n",
1276 r100_cs_dump_packet(p, pkt);
1279 idx_value = radeon_get_ib_value(p, idx);
1280 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
1282 track->arrays[i + 0].esize = idx_value >> 8;
1283 track->arrays[i + 0].robj = reloc->robj;
1284 track->arrays[i + 0].esize &= 0x7F;
1285 r = r100_cs_packet_next_reloc(p, &reloc);
1287 DRM_ERROR("No reloc for packet3 %d\n",
1289 r100_cs_dump_packet(p, pkt);
1292 ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset);
1293 track->arrays[i + 1].robj = reloc->robj;
1294 track->arrays[i + 1].esize = idx_value >> 24;
1295 track->arrays[i + 1].esize &= 0x7F;
1298 r = r100_cs_packet_next_reloc(p, &reloc);
1300 DRM_ERROR("No reloc for packet3 %d\n",
1302 r100_cs_dump_packet(p, pkt);
1305 idx_value = radeon_get_ib_value(p, idx);
1306 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
1307 track->arrays[i + 0].robj = reloc->robj;
1308 track->arrays[i + 0].esize = idx_value >> 8;
1309 track->arrays[i + 0].esize &= 0x7F;
1314 int r100_cs_parse_packet0(struct radeon_cs_parser *p,
1315 struct radeon_cs_packet *pkt,
1316 const unsigned *auth, unsigned n,
1317 radeon_packet0_check_t check)
1326 /* Check that register fall into register range
1327 * determined by the number of entry (n) in the
1328 * safe register bitmap.
1330 if (pkt->one_reg_wr) {
1331 if ((reg >> 7) > n) {
1335 if (((reg + (pkt->count << 2)) >> 7) > n) {
1339 for (i = 0; i <= pkt->count; i++, idx++) {
1341 m = 1 << ((reg >> 2) & 31);
1343 r = check(p, pkt, idx, reg);
1348 if (pkt->one_reg_wr) {
1349 if (!(auth[j] & m)) {
1359 void r100_cs_dump_packet(struct radeon_cs_parser *p,
1360 struct radeon_cs_packet *pkt)
1362 volatile uint32_t *ib;
1368 for (i = 0; i <= (pkt->count + 1); i++, idx++) {
1369 DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
1374 * r100_cs_packet_parse() - parse cp packet and point ib index to next packet
1375 * @parser: parser structure holding parsing context.
1376 * @pkt: where to store packet informations
1378 * Assume that chunk_ib_index is properly set. Will return -EINVAL
1379 * if packet is bigger than remaining ib size. or if packets is unknown.
1381 int r100_cs_packet_parse(struct radeon_cs_parser *p,
1382 struct radeon_cs_packet *pkt,
1385 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
1388 if (idx >= ib_chunk->length_dw) {
1389 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
1390 idx, ib_chunk->length_dw);
1393 header = radeon_get_ib_value(p, idx);
1395 pkt->type = CP_PACKET_GET_TYPE(header);
1396 pkt->count = CP_PACKET_GET_COUNT(header);
1397 switch (pkt->type) {
1399 pkt->reg = CP_PACKET0_GET_REG(header);
1400 pkt->one_reg_wr = CP_PACKET0_GET_ONE_REG_WR(header);
1403 pkt->opcode = CP_PACKET3_GET_OPCODE(header);
1409 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
1412 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
1413 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
1414 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
1421 * r100_cs_packet_next_vline() - parse userspace VLINE packet
1422 * @parser: parser structure holding parsing context.
1424 * Userspace sends a special sequence for VLINE waits.
1425 * PACKET0 - VLINE_START_END + value
1426 * PACKET0 - WAIT_UNTIL +_value
1427 * RELOC (P3) - crtc_id in reloc.
1429 * This function parses this and relocates the VLINE START END
1430 * and WAIT UNTIL packets to the correct crtc.
1431 * It also detects a switched off crtc and nulls out the
1432 * wait in that case.
1434 int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
1436 struct drm_mode_object *obj;
1437 struct drm_crtc *crtc;
1438 struct radeon_crtc *radeon_crtc;
1439 struct radeon_cs_packet p3reloc, waitreloc;
1442 uint32_t header, h_idx, reg;
1443 volatile uint32_t *ib;
1447 /* parse the wait until */
1448 r = r100_cs_packet_parse(p, &waitreloc, p->idx);
1452 /* check its a wait until and only 1 count */
1453 if (waitreloc.reg != RADEON_WAIT_UNTIL ||
1454 waitreloc.count != 0) {
1455 DRM_ERROR("vline wait had illegal wait until segment\n");
1459 if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) {
1460 DRM_ERROR("vline wait had illegal wait until\n");
1464 /* jump over the NOP */
1465 r = r100_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2);
1470 p->idx += waitreloc.count + 2;
1471 p->idx += p3reloc.count + 2;
1473 header = radeon_get_ib_value(p, h_idx);
1474 crtc_id = radeon_get_ib_value(p, h_idx + 5);
1475 reg = CP_PACKET0_GET_REG(header);
1476 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
1478 DRM_ERROR("cannot find crtc %d\n", crtc_id);
1481 crtc = obj_to_crtc(obj);
1482 radeon_crtc = to_radeon_crtc(crtc);
1483 crtc_id = radeon_crtc->crtc_id;
1485 if (!crtc->enabled) {
1486 /* if the CRTC isn't enabled - we need to nop out the wait until */
1487 ib[h_idx + 2] = PACKET2(0);
1488 ib[h_idx + 3] = PACKET2(0);
1489 } else if (crtc_id == 1) {
1491 case AVIVO_D1MODE_VLINE_START_END:
1492 header &= ~R300_CP_PACKET0_REG_MASK;
1493 header |= AVIVO_D2MODE_VLINE_START_END >> 2;
1495 case RADEON_CRTC_GUI_TRIG_VLINE:
1496 header &= ~R300_CP_PACKET0_REG_MASK;
1497 header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2;
1500 DRM_ERROR("unknown crtc reloc\n");
1504 ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
1511 * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3
1512 * @parser: parser structure holding parsing context.
1513 * @data: pointer to relocation data
1514 * @offset_start: starting offset
1515 * @offset_mask: offset mask (to align start offset on)
1516 * @reloc: reloc informations
1518 * Check next packet is relocation packet3, do bo validation and compute
1519 * GPU offset using the provided start.
1521 int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
1522 struct radeon_cs_reloc **cs_reloc)
1524 struct radeon_cs_chunk *relocs_chunk;
1525 struct radeon_cs_packet p3reloc;
1529 if (p->chunk_relocs_idx == -1) {
1530 DRM_ERROR("No relocation chunk !\n");
1534 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
1535 r = r100_cs_packet_parse(p, &p3reloc, p->idx);
1539 p->idx += p3reloc.count + 2;
1540 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
1541 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
1543 r100_cs_dump_packet(p, &p3reloc);
1546 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
1547 if (idx >= relocs_chunk->length_dw) {
1548 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
1549 idx, relocs_chunk->length_dw);
1550 r100_cs_dump_packet(p, &p3reloc);
1553 /* FIXME: we assume reloc size is 4 dwords */
1554 *cs_reloc = p->relocs_ptr[(idx / 4)];
1558 static int r100_get_vtx_size(uint32_t vtx_fmt)
1562 /* ordered according to bits in spec */
1563 if (vtx_fmt & RADEON_SE_VTX_FMT_W0)
1565 if (vtx_fmt & RADEON_SE_VTX_FMT_FPCOLOR)
1567 if (vtx_fmt & RADEON_SE_VTX_FMT_FPALPHA)
1569 if (vtx_fmt & RADEON_SE_VTX_FMT_PKCOLOR)
1571 if (vtx_fmt & RADEON_SE_VTX_FMT_FPSPEC)
1573 if (vtx_fmt & RADEON_SE_VTX_FMT_FPFOG)
1575 if (vtx_fmt & RADEON_SE_VTX_FMT_PKSPEC)
1577 if (vtx_fmt & RADEON_SE_VTX_FMT_ST0)
1579 if (vtx_fmt & RADEON_SE_VTX_FMT_ST1)
1581 if (vtx_fmt & RADEON_SE_VTX_FMT_Q1)
1583 if (vtx_fmt & RADEON_SE_VTX_FMT_ST2)
1585 if (vtx_fmt & RADEON_SE_VTX_FMT_Q2)
1587 if (vtx_fmt & RADEON_SE_VTX_FMT_ST3)
1589 if (vtx_fmt & RADEON_SE_VTX_FMT_Q3)
1591 if (vtx_fmt & RADEON_SE_VTX_FMT_Q0)
1594 if (vtx_fmt & (0x7 << 15))
1595 vtx_size += (vtx_fmt >> 15) & 0x7;
1596 if (vtx_fmt & RADEON_SE_VTX_FMT_N0)
1598 if (vtx_fmt & RADEON_SE_VTX_FMT_XY1)
1600 if (vtx_fmt & RADEON_SE_VTX_FMT_Z1)
1602 if (vtx_fmt & RADEON_SE_VTX_FMT_W1)
1604 if (vtx_fmt & RADEON_SE_VTX_FMT_N1)
1606 if (vtx_fmt & RADEON_SE_VTX_FMT_Z)
1611 static int r100_packet0_check(struct radeon_cs_parser *p,
1612 struct radeon_cs_packet *pkt,
1613 unsigned idx, unsigned reg)
1615 struct radeon_cs_reloc *reloc;
1616 struct r100_cs_track *track;
1617 volatile uint32_t *ib;
1625 track = (struct r100_cs_track *)p->track;
1627 idx_value = radeon_get_ib_value(p, idx);
1630 case RADEON_CRTC_GUI_TRIG_VLINE:
1631 r = r100_cs_packet_parse_vline(p);
1633 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1635 r100_cs_dump_packet(p, pkt);
1639 /* FIXME: only allow PACKET3 blit? easier to check for out of
1641 case RADEON_DST_PITCH_OFFSET:
1642 case RADEON_SRC_PITCH_OFFSET:
1643 r = r100_reloc_pitch_offset(p, pkt, idx, reg);
1647 case RADEON_RB3D_DEPTHOFFSET:
1648 r = r100_cs_packet_next_reloc(p, &reloc);
1650 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1652 r100_cs_dump_packet(p, pkt);
1655 track->zb.robj = reloc->robj;
1656 track->zb.offset = idx_value;
1657 track->zb_dirty = true;
1658 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1660 case RADEON_RB3D_COLOROFFSET:
1661 r = r100_cs_packet_next_reloc(p, &reloc);
1663 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1665 r100_cs_dump_packet(p, pkt);
1668 track->cb[0].robj = reloc->robj;
1669 track->cb[0].offset = idx_value;
1670 track->cb_dirty = true;
1671 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1673 case RADEON_PP_TXOFFSET_0:
1674 case RADEON_PP_TXOFFSET_1:
1675 case RADEON_PP_TXOFFSET_2:
1676 i = (reg - RADEON_PP_TXOFFSET_0) / 24;
1677 r = r100_cs_packet_next_reloc(p, &reloc);
1679 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1681 r100_cs_dump_packet(p, pkt);
1684 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1685 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1686 tile_flags |= RADEON_TXO_MACRO_TILE;
1687 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1688 tile_flags |= RADEON_TXO_MICRO_TILE_X2;
1690 tmp = idx_value & ~(0x7 << 2);
1692 ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset);
1694 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1695 track->textures[i].robj = reloc->robj;
1696 track->tex_dirty = true;
1698 case RADEON_PP_CUBIC_OFFSET_T0_0:
1699 case RADEON_PP_CUBIC_OFFSET_T0_1:
1700 case RADEON_PP_CUBIC_OFFSET_T0_2:
1701 case RADEON_PP_CUBIC_OFFSET_T0_3:
1702 case RADEON_PP_CUBIC_OFFSET_T0_4:
1703 i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4;
1704 r = r100_cs_packet_next_reloc(p, &reloc);
1706 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1708 r100_cs_dump_packet(p, pkt);
1711 track->textures[0].cube_info[i].offset = idx_value;
1712 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1713 track->textures[0].cube_info[i].robj = reloc->robj;
1714 track->tex_dirty = true;
1716 case RADEON_PP_CUBIC_OFFSET_T1_0:
1717 case RADEON_PP_CUBIC_OFFSET_T1_1:
1718 case RADEON_PP_CUBIC_OFFSET_T1_2:
1719 case RADEON_PP_CUBIC_OFFSET_T1_3:
1720 case RADEON_PP_CUBIC_OFFSET_T1_4:
1721 i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4;
1722 r = r100_cs_packet_next_reloc(p, &reloc);
1724 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1726 r100_cs_dump_packet(p, pkt);
1729 track->textures[1].cube_info[i].offset = idx_value;
1730 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1731 track->textures[1].cube_info[i].robj = reloc->robj;
1732 track->tex_dirty = true;
1734 case RADEON_PP_CUBIC_OFFSET_T2_0:
1735 case RADEON_PP_CUBIC_OFFSET_T2_1:
1736 case RADEON_PP_CUBIC_OFFSET_T2_2:
1737 case RADEON_PP_CUBIC_OFFSET_T2_3:
1738 case RADEON_PP_CUBIC_OFFSET_T2_4:
1739 i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4;
1740 r = r100_cs_packet_next_reloc(p, &reloc);
1742 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1744 r100_cs_dump_packet(p, pkt);
1747 track->textures[2].cube_info[i].offset = idx_value;
1748 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1749 track->textures[2].cube_info[i].robj = reloc->robj;
1750 track->tex_dirty = true;
1752 case RADEON_RE_WIDTH_HEIGHT:
1753 track->maxy = ((idx_value >> 16) & 0x7FF);
1754 track->cb_dirty = true;
1755 track->zb_dirty = true;
1757 case RADEON_RB3D_COLORPITCH:
1758 r = r100_cs_packet_next_reloc(p, &reloc);
1760 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1762 r100_cs_dump_packet(p, pkt);
1765 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1766 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1767 tile_flags |= RADEON_COLOR_TILE_ENABLE;
1768 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1769 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
1771 tmp = idx_value & ~(0x7 << 16);
1775 ib[idx] = idx_value;
1777 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
1778 track->cb_dirty = true;
1780 case RADEON_RB3D_DEPTHPITCH:
1781 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
1782 track->zb_dirty = true;
1784 case RADEON_RB3D_CNTL:
1785 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
1791 track->cb[0].cpp = 1;
1796 track->cb[0].cpp = 2;
1799 track->cb[0].cpp = 4;
1802 DRM_ERROR("Invalid color buffer format (%d) !\n",
1803 ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
1806 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
1807 track->cb_dirty = true;
1808 track->zb_dirty = true;
1810 case RADEON_RB3D_ZSTENCILCNTL:
1811 switch (idx_value & 0xf) {
1826 track->zb_dirty = true;
1828 case RADEON_RB3D_ZPASS_ADDR:
1829 r = r100_cs_packet_next_reloc(p, &reloc);
1831 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1833 r100_cs_dump_packet(p, pkt);
1836 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1838 case RADEON_PP_CNTL:
1840 uint32_t temp = idx_value >> 4;
1841 for (i = 0; i < track->num_texture; i++)
1842 track->textures[i].enabled = !!(temp & (1 << i));
1843 track->tex_dirty = true;
1846 case RADEON_SE_VF_CNTL:
1847 track->vap_vf_cntl = idx_value;
1849 case RADEON_SE_VTX_FMT:
1850 track->vtx_size = r100_get_vtx_size(idx_value);
1852 case RADEON_PP_TEX_SIZE_0:
1853 case RADEON_PP_TEX_SIZE_1:
1854 case RADEON_PP_TEX_SIZE_2:
1855 i = (reg - RADEON_PP_TEX_SIZE_0) / 8;
1856 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
1857 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
1858 track->tex_dirty = true;
1860 case RADEON_PP_TEX_PITCH_0:
1861 case RADEON_PP_TEX_PITCH_1:
1862 case RADEON_PP_TEX_PITCH_2:
1863 i = (reg - RADEON_PP_TEX_PITCH_0) / 8;
1864 track->textures[i].pitch = idx_value + 32;
1865 track->tex_dirty = true;
1867 case RADEON_PP_TXFILTER_0:
1868 case RADEON_PP_TXFILTER_1:
1869 case RADEON_PP_TXFILTER_2:
1870 i = (reg - RADEON_PP_TXFILTER_0) / 24;
1871 track->textures[i].num_levels = ((idx_value & RADEON_MAX_MIP_LEVEL_MASK)
1872 >> RADEON_MAX_MIP_LEVEL_SHIFT);
1873 tmp = (idx_value >> 23) & 0x7;
1874 if (tmp == 2 || tmp == 6)
1875 track->textures[i].roundup_w = false;
1876 tmp = (idx_value >> 27) & 0x7;
1877 if (tmp == 2 || tmp == 6)
1878 track->textures[i].roundup_h = false;
1879 track->tex_dirty = true;
1881 case RADEON_PP_TXFORMAT_0:
1882 case RADEON_PP_TXFORMAT_1:
1883 case RADEON_PP_TXFORMAT_2:
1884 i = (reg - RADEON_PP_TXFORMAT_0) / 24;
1885 if (idx_value & RADEON_TXFORMAT_NON_POWER2) {
1886 track->textures[i].use_pitch = 1;
1888 track->textures[i].use_pitch = 0;
1889 track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
1890 track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
1892 if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE)
1893 track->textures[i].tex_coord_type = 2;
1894 switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
1895 case RADEON_TXFORMAT_I8:
1896 case RADEON_TXFORMAT_RGB332:
1897 case RADEON_TXFORMAT_Y8:
1898 track->textures[i].cpp = 1;
1899 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
1901 case RADEON_TXFORMAT_AI88:
1902 case RADEON_TXFORMAT_ARGB1555:
1903 case RADEON_TXFORMAT_RGB565:
1904 case RADEON_TXFORMAT_ARGB4444:
1905 case RADEON_TXFORMAT_VYUY422:
1906 case RADEON_TXFORMAT_YVYU422:
1907 case RADEON_TXFORMAT_SHADOW16:
1908 case RADEON_TXFORMAT_LDUDV655:
1909 case RADEON_TXFORMAT_DUDV88:
1910 track->textures[i].cpp = 2;
1911 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
1913 case RADEON_TXFORMAT_ARGB8888:
1914 case RADEON_TXFORMAT_RGBA8888:
1915 case RADEON_TXFORMAT_SHADOW32:
1916 case RADEON_TXFORMAT_LDUDUV8888:
1917 track->textures[i].cpp = 4;
1918 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
1920 case RADEON_TXFORMAT_DXT1:
1921 track->textures[i].cpp = 1;
1922 track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
1924 case RADEON_TXFORMAT_DXT23:
1925 case RADEON_TXFORMAT_DXT45:
1926 track->textures[i].cpp = 1;
1927 track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
1930 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
1931 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
1932 track->tex_dirty = true;
1934 case RADEON_PP_CUBIC_FACES_0:
1935 case RADEON_PP_CUBIC_FACES_1:
1936 case RADEON_PP_CUBIC_FACES_2:
1938 i = (reg - RADEON_PP_CUBIC_FACES_0) / 4;
1939 for (face = 0; face < 4; face++) {
1940 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
1941 track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
1943 track->tex_dirty = true;
1946 DRM_ERROR("Forbidden register 0x%04X in cs at %d\n",
1953 int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
1954 struct radeon_cs_packet *pkt,
1955 struct radeon_bo *robj)
1960 value = radeon_get_ib_value(p, idx + 2);
1961 if ((value + 1) > radeon_bo_size(robj)) {
1962 DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
1963 "(need %u have %lu) !\n",
1965 radeon_bo_size(robj));
1971 static int r100_packet3_check(struct radeon_cs_parser *p,
1972 struct radeon_cs_packet *pkt)
1974 struct radeon_cs_reloc *reloc;
1975 struct r100_cs_track *track;
1977 volatile uint32_t *ib;
1982 track = (struct r100_cs_track *)p->track;
1983 switch (pkt->opcode) {
1984 case PACKET3_3D_LOAD_VBPNTR:
1985 r = r100_packet3_load_vbpntr(p, pkt, idx);
1989 case PACKET3_INDX_BUFFER:
1990 r = r100_cs_packet_next_reloc(p, &reloc);
1992 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1993 r100_cs_dump_packet(p, pkt);
1996 ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset);
1997 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
2003 /* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
2004 r = r100_cs_packet_next_reloc(p, &reloc);
2006 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
2007 r100_cs_dump_packet(p, pkt);
2010 ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset);
2011 track->num_arrays = 1;
2012 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2));
2014 track->arrays[0].robj = reloc->robj;
2015 track->arrays[0].esize = track->vtx_size;
2017 track->max_indx = radeon_get_ib_value(p, idx+1);
2019 track->vap_vf_cntl = radeon_get_ib_value(p, idx+3);
2020 track->immd_dwords = pkt->count - 1;
2021 r = r100_cs_track_check(p->rdev, track);
2025 case PACKET3_3D_DRAW_IMMD:
2026 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
2027 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
2030 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0));
2031 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
2032 track->immd_dwords = pkt->count - 1;
2033 r = r100_cs_track_check(p->rdev, track);
2037 /* triggers drawing using in-packet vertex data */
2038 case PACKET3_3D_DRAW_IMMD_2:
2039 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
2040 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
2043 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
2044 track->immd_dwords = pkt->count;
2045 r = r100_cs_track_check(p->rdev, track);
2049 /* triggers drawing using in-packet vertex data */
2050 case PACKET3_3D_DRAW_VBUF_2:
2051 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
2052 r = r100_cs_track_check(p->rdev, track);
2056 /* triggers drawing of vertex buffers setup elsewhere */
2057 case PACKET3_3D_DRAW_INDX_2:
2058 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
2059 r = r100_cs_track_check(p->rdev, track);
2063 /* triggers drawing using indices to vertex buffer */
2064 case PACKET3_3D_DRAW_VBUF:
2065 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
2066 r = r100_cs_track_check(p->rdev, track);
2070 /* triggers drawing of vertex buffers setup elsewhere */
2071 case PACKET3_3D_DRAW_INDX:
2072 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
2073 r = r100_cs_track_check(p->rdev, track);
2077 /* triggers drawing using indices to vertex buffer */
2078 case PACKET3_3D_CLEAR_HIZ:
2079 case PACKET3_3D_CLEAR_ZMASK:
2080 if (p->rdev->hyperz_filp != p->filp)
2086 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
2092 int r100_cs_parse(struct radeon_cs_parser *p)
2094 struct radeon_cs_packet pkt;
2095 struct r100_cs_track *track;
2098 track = malloc(sizeof(*track), DRM_MEM_DRIVER, M_ZERO | M_WAITOK);
2101 r100_cs_track_clear(p->rdev, track);
2104 r = r100_cs_packet_parse(p, &pkt, p->idx);
2106 free(p->track, DRM_MEM_DRIVER);
2110 p->idx += pkt.count + 2;
2113 if (p->rdev->family >= CHIP_R200)
2114 r = r100_cs_parse_packet0(p, &pkt,
2115 p->rdev->config.r100.reg_safe_bm,
2116 p->rdev->config.r100.reg_safe_bm_size,
2117 &r200_packet0_check);
2119 r = r100_cs_parse_packet0(p, &pkt,
2120 p->rdev->config.r100.reg_safe_bm,
2121 p->rdev->config.r100.reg_safe_bm_size,
2122 &r100_packet0_check);
2127 r = r100_packet3_check(p, &pkt);
2130 DRM_ERROR("Unknown packet type %d !\n",
2132 free(p->track, DRM_MEM_DRIVER);
2137 free(p->track, DRM_MEM_DRIVER);
2141 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
2142 free(p->track, DRM_MEM_DRIVER);
2147 static void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
2149 DRM_ERROR("pitch %d\n", t->pitch);
2150 DRM_ERROR("use_pitch %d\n", t->use_pitch);
2151 DRM_ERROR("width %d\n", t->width);
2152 DRM_ERROR("width_11 %d\n", t->width_11);
2153 DRM_ERROR("height %d\n", t->height);
2154 DRM_ERROR("height_11 %d\n", t->height_11);
2155 DRM_ERROR("num levels %d\n", t->num_levels);
2156 DRM_ERROR("depth %d\n", t->txdepth);
2157 DRM_ERROR("bpp %d\n", t->cpp);
2158 DRM_ERROR("coordinate type %d\n", t->tex_coord_type);
2159 DRM_ERROR("width round to power of 2 %d\n", t->roundup_w);
2160 DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
2161 DRM_ERROR("compress format %d\n", t->compress_format);
2164 static int r100_track_compress_size(int compress_format, int w, int h)
2166 int block_width, block_height, block_bytes;
2167 int wblocks, hblocks;
2174 switch (compress_format) {
2175 case R100_TRACK_COMP_DXT1:
2180 case R100_TRACK_COMP_DXT35:
2186 hblocks = (h + block_height - 1) / block_height;
2187 wblocks = (w + block_width - 1) / block_width;
2188 if (wblocks < min_wblocks)
2189 wblocks = min_wblocks;
2190 sz = wblocks * hblocks * block_bytes;
2194 static int r100_cs_track_cube(struct radeon_device *rdev,
2195 struct r100_cs_track *track, unsigned idx)
2197 unsigned face, w, h;
2198 struct radeon_bo *cube_robj;
2200 unsigned compress_format = track->textures[idx].compress_format;
2202 for (face = 0; face < 5; face++) {
2203 cube_robj = track->textures[idx].cube_info[face].robj;
2204 w = track->textures[idx].cube_info[face].width;
2205 h = track->textures[idx].cube_info[face].height;
2207 if (compress_format) {
2208 size = r100_track_compress_size(compress_format, w, h);
2211 size *= track->textures[idx].cpp;
2213 size += track->textures[idx].cube_info[face].offset;
2215 if (size > radeon_bo_size(cube_robj)) {
2216 DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
2217 size, radeon_bo_size(cube_robj));
2218 r100_cs_track_texture_print(&track->textures[idx]);
2225 static int r100_cs_track_texture_check(struct radeon_device *rdev,
2226 struct r100_cs_track *track)
2228 struct radeon_bo *robj;
2230 unsigned u, i, w, h, d;
2233 for (u = 0; u < track->num_texture; u++) {
2234 if (!track->textures[u].enabled)
2236 if (track->textures[u].lookup_disable)
2238 robj = track->textures[u].robj;
2240 DRM_ERROR("No texture bound to unit %u\n", u);
2244 for (i = 0; i <= track->textures[u].num_levels; i++) {
2245 if (track->textures[u].use_pitch) {
2246 if (rdev->family < CHIP_R300)
2247 w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i);
2249 w = track->textures[u].pitch / (1 << i);
2251 w = track->textures[u].width;
2252 if (rdev->family >= CHIP_RV515)
2253 w |= track->textures[u].width_11;
2255 if (track->textures[u].roundup_w)
2256 w = roundup_pow_of_two(w);
2258 h = track->textures[u].height;
2259 if (rdev->family >= CHIP_RV515)
2260 h |= track->textures[u].height_11;
2262 if (track->textures[u].roundup_h)
2263 h = roundup_pow_of_two(h);
2264 if (track->textures[u].tex_coord_type == 1) {
2265 d = (1 << track->textures[u].txdepth) / (1 << i);
2271 if (track->textures[u].compress_format) {
2273 size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d;
2274 /* compressed textures are block based */
2278 size *= track->textures[u].cpp;
2280 switch (track->textures[u].tex_coord_type) {
2285 if (track->separate_cube) {
2286 ret = r100_cs_track_cube(rdev, track, u);
2293 DRM_ERROR("Invalid texture coordinate type %u for unit "
2294 "%u\n", track->textures[u].tex_coord_type, u);
2297 if (size > radeon_bo_size(robj)) {
2298 DRM_ERROR("Texture of unit %u needs %lu bytes but is "
2299 "%lu\n", u, size, radeon_bo_size(robj));
2300 r100_cs_track_texture_print(&track->textures[u]);
2307 int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
2313 unsigned num_cb = track->cb_dirty ? track->num_cb : 0;
2315 if (num_cb && !track->zb_cb_clear && !track->color_channel_mask &&
2316 !track->blend_read_enable)
2319 for (i = 0; i < num_cb; i++) {
2320 if (track->cb[i].robj == NULL) {
2321 DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
2324 size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
2325 size += track->cb[i].offset;
2326 if (size > radeon_bo_size(track->cb[i].robj)) {
2327 DRM_ERROR("[drm] Buffer too small for color buffer %d "
2328 "(need %lu have %lu) !\n", i, size,
2329 radeon_bo_size(track->cb[i].robj));
2330 DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
2331 i, track->cb[i].pitch, track->cb[i].cpp,
2332 track->cb[i].offset, track->maxy);
2336 track->cb_dirty = false;
2338 if (track->zb_dirty && track->z_enabled) {
2339 if (track->zb.robj == NULL) {
2340 DRM_ERROR("[drm] No buffer for z buffer !\n");
2343 size = track->zb.pitch * track->zb.cpp * track->maxy;
2344 size += track->zb.offset;
2345 if (size > radeon_bo_size(track->zb.robj)) {
2346 DRM_ERROR("[drm] Buffer too small for z buffer "
2347 "(need %lu have %lu) !\n", size,
2348 radeon_bo_size(track->zb.robj));
2349 DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
2350 track->zb.pitch, track->zb.cpp,
2351 track->zb.offset, track->maxy);
2355 track->zb_dirty = false;
2357 if (track->aa_dirty && track->aaresolve) {
2358 if (track->aa.robj == NULL) {
2359 DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i);
2362 /* I believe the format comes from colorbuffer0. */
2363 size = track->aa.pitch * track->cb[0].cpp * track->maxy;
2364 size += track->aa.offset;
2365 if (size > radeon_bo_size(track->aa.robj)) {
2366 DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d "
2367 "(need %lu have %lu) !\n", i, size,
2368 radeon_bo_size(track->aa.robj));
2369 DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n",
2370 i, track->aa.pitch, track->cb[0].cpp,
2371 track->aa.offset, track->maxy);
2375 track->aa_dirty = false;
2377 prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
2378 if (track->vap_vf_cntl & (1 << 14)) {
2379 nverts = track->vap_alt_nverts;
2381 nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
2383 switch (prim_walk) {
2385 for (i = 0; i < track->num_arrays; i++) {
2386 size = track->arrays[i].esize * track->max_indx * 4;
2387 if (track->arrays[i].robj == NULL) {
2388 DRM_ERROR("(PW %u) Vertex array %u no buffer "
2389 "bound\n", prim_walk, i);
2392 if (size > radeon_bo_size(track->arrays[i].robj)) {
2393 dev_err(rdev->dev, "(PW %u) Vertex array %u "
2394 "need %lu dwords have %lu dwords\n",
2395 prim_walk, i, size >> 2,
2396 radeon_bo_size(track->arrays[i].robj)
2398 DRM_ERROR("Max indices %u\n", track->max_indx);
2404 for (i = 0; i < track->num_arrays; i++) {
2405 size = track->arrays[i].esize * (nverts - 1) * 4;
2406 if (track->arrays[i].robj == NULL) {
2407 DRM_ERROR("(PW %u) Vertex array %u no buffer "
2408 "bound\n", prim_walk, i);
2411 if (size > radeon_bo_size(track->arrays[i].robj)) {
2412 dev_err(rdev->dev, "(PW %u) Vertex array %u "
2413 "need %lu dwords have %lu dwords\n",
2414 prim_walk, i, size >> 2,
2415 radeon_bo_size(track->arrays[i].robj)
2422 size = track->vtx_size * nverts;
2423 if (size != track->immd_dwords) {
2424 DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n",
2425 track->immd_dwords, size);
2426 DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
2427 nverts, track->vtx_size);
2432 DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
2437 if (track->tex_dirty) {
2438 track->tex_dirty = false;
2439 return r100_cs_track_texture_check(rdev, track);
2444 void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track)
2448 track->cb_dirty = true;
2449 track->zb_dirty = true;
2450 track->tex_dirty = true;
2451 track->aa_dirty = true;
2453 if (rdev->family < CHIP_R300) {
2455 if (rdev->family <= CHIP_RS200)
2456 track->num_texture = 3;
2458 track->num_texture = 6;
2460 track->separate_cube = 1;
2463 track->num_texture = 16;
2465 track->separate_cube = 0;
2466 track->aaresolve = false;
2467 track->aa.robj = NULL;
2470 for (i = 0; i < track->num_cb; i++) {
2471 track->cb[i].robj = NULL;
2472 track->cb[i].pitch = 8192;
2473 track->cb[i].cpp = 16;
2474 track->cb[i].offset = 0;
2476 track->z_enabled = true;
2477 track->zb.robj = NULL;
2478 track->zb.pitch = 8192;
2480 track->zb.offset = 0;
2481 track->vtx_size = 0x7F;
2482 track->immd_dwords = 0xFFFFFFFFUL;
2483 track->num_arrays = 11;
2484 track->max_indx = 0x00FFFFFFUL;
2485 for (i = 0; i < track->num_arrays; i++) {
2486 track->arrays[i].robj = NULL;
2487 track->arrays[i].esize = 0x7F;
2489 for (i = 0; i < track->num_texture; i++) {
2490 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
2491 track->textures[i].pitch = 16536;
2492 track->textures[i].width = 16536;
2493 track->textures[i].height = 16536;
2494 track->textures[i].width_11 = 1 << 11;
2495 track->textures[i].height_11 = 1 << 11;
2496 track->textures[i].num_levels = 12;
2497 if (rdev->family <= CHIP_RS200) {
2498 track->textures[i].tex_coord_type = 0;
2499 track->textures[i].txdepth = 0;
2501 track->textures[i].txdepth = 16;
2502 track->textures[i].tex_coord_type = 1;
2504 track->textures[i].cpp = 64;
2505 track->textures[i].robj = NULL;
2506 /* CS IB emission code makes sure texture unit are disabled */
2507 track->textures[i].enabled = false;
2508 track->textures[i].lookup_disable = false;
2509 track->textures[i].roundup_w = true;
2510 track->textures[i].roundup_h = true;
2511 if (track->separate_cube)
2512 for (face = 0; face < 5; face++) {
2513 track->textures[i].cube_info[face].robj = NULL;
2514 track->textures[i].cube_info[face].width = 16536;
2515 track->textures[i].cube_info[face].height = 16536;
2516 track->textures[i].cube_info[face].offset = 0;
2522 * Global GPU functions
2524 static void r100_errata(struct radeon_device *rdev)
2526 rdev->pll_errata = 0;
2528 if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) {
2529 rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS;
2532 if (rdev->family == CHIP_RV100 ||
2533 rdev->family == CHIP_RS100 ||
2534 rdev->family == CHIP_RS200) {
2535 rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY;
2539 static int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
2544 for (i = 0; i < rdev->usec_timeout; i++) {
2545 tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK;
2554 int r100_gui_wait_for_idle(struct radeon_device *rdev)
2559 if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) {
2560 DRM_ERROR("radeon: wait for empty RBBM fifo failed !"
2561 " Bad things might happen.\n");
2563 for (i = 0; i < rdev->usec_timeout; i++) {
2564 tmp = RREG32(RADEON_RBBM_STATUS);
2565 if (!(tmp & RADEON_RBBM_ACTIVE)) {
2573 int r100_mc_wait_for_idle(struct radeon_device *rdev)
2578 for (i = 0; i < rdev->usec_timeout; i++) {
2579 /* read MC_STATUS */
2580 tmp = RREG32(RADEON_MC_STATUS);
2581 if (tmp & RADEON_MC_IDLE) {
2589 bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
2593 rbbm_status = RREG32(R_000E40_RBBM_STATUS);
2594 if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
2595 radeon_ring_lockup_update(ring);
2598 /* force CP activities */
2599 radeon_ring_force_activity(rdev, ring);
2600 return radeon_ring_test_lockup(rdev, ring);
2603 /* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
2604 void r100_enable_bm(struct radeon_device *rdev)
2607 /* Enable bus mastering */
2608 tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
2609 WREG32(RADEON_BUS_CNTL, tmp);
2612 void r100_bm_disable(struct radeon_device *rdev)
2616 /* disable bus mastering */
2617 tmp = RREG32(R_000030_BUS_CNTL);
2618 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044);
2620 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042);
2622 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
2623 tmp = RREG32(RADEON_BUS_CNTL);
2625 pci_disable_busmaster(rdev->dev);
2629 int r100_asic_reset(struct radeon_device *rdev)
2631 struct r100_mc_save save;
2635 status = RREG32(R_000E40_RBBM_STATUS);
2636 if (!G_000E40_GUI_ACTIVE(status)) {
2639 r100_mc_stop(rdev, &save);
2640 status = RREG32(R_000E40_RBBM_STATUS);
2641 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
2643 WREG32(RADEON_CP_CSQ_CNTL, 0);
2644 tmp = RREG32(RADEON_CP_RB_CNTL);
2645 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
2646 WREG32(RADEON_CP_RB_RPTR_WR, 0);
2647 WREG32(RADEON_CP_RB_WPTR, 0);
2648 WREG32(RADEON_CP_RB_CNTL, tmp);
2649 /* save PCI state */
2650 pci_save_state(device_get_parent(rdev->dev));
2651 /* disable bus mastering */
2652 r100_bm_disable(rdev);
2653 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) |
2654 S_0000F0_SOFT_RESET_RE(1) |
2655 S_0000F0_SOFT_RESET_PP(1) |
2656 S_0000F0_SOFT_RESET_RB(1));
2657 RREG32(R_0000F0_RBBM_SOFT_RESET);
2659 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
2661 status = RREG32(R_000E40_RBBM_STATUS);
2662 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
2664 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
2665 RREG32(R_0000F0_RBBM_SOFT_RESET);
2667 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
2669 status = RREG32(R_000E40_RBBM_STATUS);
2670 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
2671 /* restore PCI & busmastering */
2672 pci_restore_state(device_get_parent(rdev->dev));
2673 r100_enable_bm(rdev);
2674 /* Check if GPU is idle */
2675 if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) ||
2676 G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
2677 dev_err(rdev->dev, "failed to reset GPU\n");
2680 dev_info(rdev->dev, "GPU reset succeed\n");
2681 r100_mc_resume(rdev, &save);
2685 void r100_set_common_regs(struct radeon_device *rdev)
2687 struct drm_device *dev = rdev->ddev;
2688 bool force_dac2 = false;
2691 /* set these so they don't interfere with anything */
2692 WREG32(RADEON_OV0_SCALE_CNTL, 0);
2693 WREG32(RADEON_SUBPIC_CNTL, 0);
2694 WREG32(RADEON_VIPH_CONTROL, 0);
2695 WREG32(RADEON_I2C_CNTL_1, 0);
2696 WREG32(RADEON_DVI_I2C_CNTL_1, 0);
2697 WREG32(RADEON_CAP0_TRIG_CNTL, 0);
2698 WREG32(RADEON_CAP1_TRIG_CNTL, 0);
2700 /* always set up dac2 on rn50 and some rv100 as lots
2701 * of servers seem to wire it up to a VGA port but
2702 * don't report it in the bios connector
2705 switch (dev->pci_device) {
2714 /* DELL triple head servers */
2715 if ((dev->pci_subvendor == 0x1028 /* DELL */) &&
2716 ((dev->pci_subdevice == 0x016c) ||
2717 (dev->pci_subdevice == 0x016d) ||
2718 (dev->pci_subdevice == 0x016e) ||
2719 (dev->pci_subdevice == 0x016f) ||
2720 (dev->pci_subdevice == 0x0170) ||
2721 (dev->pci_subdevice == 0x017d) ||
2722 (dev->pci_subdevice == 0x017e) ||
2723 (dev->pci_subdevice == 0x0183) ||
2724 (dev->pci_subdevice == 0x018a) ||
2725 (dev->pci_subdevice == 0x019a)))
2731 u32 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
2732 u32 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
2733 u32 dac2_cntl = RREG32(RADEON_DAC_CNTL2);
2735 /* For CRT on DAC2, don't turn it on if BIOS didn't
2736 enable it, even it's detected.
2739 /* force it to crtc0 */
2740 dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL;
2741 dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL;
2742 disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
2744 /* set up the TV DAC */
2745 tv_dac_cntl &= ~(RADEON_TV_DAC_PEDESTAL |
2746 RADEON_TV_DAC_STD_MASK |
2747 RADEON_TV_DAC_RDACPD |
2748 RADEON_TV_DAC_GDACPD |
2749 RADEON_TV_DAC_BDACPD |
2750 RADEON_TV_DAC_BGADJ_MASK |
2751 RADEON_TV_DAC_DACADJ_MASK);
2752 tv_dac_cntl |= (RADEON_TV_DAC_NBLANK |
2753 RADEON_TV_DAC_NHOLD |
2754 RADEON_TV_DAC_STD_PS2 |
2757 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
2758 WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
2759 WREG32(RADEON_DAC_CNTL2, dac2_cntl);
2762 /* switch PM block to ACPI mode */
2763 tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL);
2764 tmp &= ~RADEON_PM_MODE_SEL;
2765 WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
2772 static void r100_vram_get_type(struct radeon_device *rdev)
2776 rdev->mc.vram_is_ddr = false;
2777 if (rdev->flags & RADEON_IS_IGP)
2778 rdev->mc.vram_is_ddr = true;
2779 else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR)
2780 rdev->mc.vram_is_ddr = true;
2781 if ((rdev->family == CHIP_RV100) ||
2782 (rdev->family == CHIP_RS100) ||
2783 (rdev->family == CHIP_RS200)) {
2784 tmp = RREG32(RADEON_MEM_CNTL);
2785 if (tmp & RV100_HALF_MODE) {
2786 rdev->mc.vram_width = 32;
2788 rdev->mc.vram_width = 64;
2790 if (rdev->flags & RADEON_SINGLE_CRTC) {
2791 rdev->mc.vram_width /= 4;
2792 rdev->mc.vram_is_ddr = true;
2794 } else if (rdev->family <= CHIP_RV280) {
2795 tmp = RREG32(RADEON_MEM_CNTL);
2796 if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) {
2797 rdev->mc.vram_width = 128;
2799 rdev->mc.vram_width = 64;
2803 rdev->mc.vram_width = 128;
2807 static u32 r100_get_accessible_vram(struct radeon_device *rdev)
2812 aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
2814 /* Set HDP_APER_CNTL only on cards that are known not to be broken,
2815 * that is has the 2nd generation multifunction PCI interface
2817 if (rdev->family == CHIP_RV280 ||
2818 rdev->family >= CHIP_RV350) {
2819 WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL,
2820 ~RADEON_HDP_APER_CNTL);
2821 DRM_INFO("Generation 2 PCI interface, using max accessible memory\n");
2822 return aper_size * 2;
2825 /* Older cards have all sorts of funny issues to deal with. First
2826 * check if it's a multifunction card by reading the PCI config
2827 * header type... Limit those to one aperture size
2829 byte = pci_read_config(rdev->dev, 0xe, 1);
2831 DRM_INFO("Generation 1 PCI interface in multifunction mode\n");
2832 DRM_INFO("Limiting VRAM to one aperture\n");
2836 /* Single function older card. We read HDP_APER_CNTL to see how the BIOS
2837 * have set it up. We don't write this as it's broken on some ASICs but
2838 * we expect the BIOS to have done the right thing (might be too optimistic...)
2840 if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL)
2841 return aper_size * 2;
2845 void r100_vram_init_sizes(struct radeon_device *rdev)
2847 u64 config_aper_size;
2849 /* work out accessible VRAM */
2850 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
2851 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
2852 rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev);
2853 /* FIXME we don't use the second aperture yet when we could use it */
2854 if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
2855 rdev->mc.visible_vram_size = rdev->mc.aper_size;
2856 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
2857 if (rdev->flags & RADEON_IS_IGP) {
2859 /* read NB_TOM to get the amount of ram stolen for the GPU */
2860 tom = RREG32(RADEON_NB_TOM);
2861 rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
2862 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
2863 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
2865 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
2866 /* Some production boards of m6 will report 0
2869 if (rdev->mc.real_vram_size == 0) {
2870 rdev->mc.real_vram_size = 8192 * 1024;
2871 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
2873 /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
2874 * Novell bug 204882 + along with lots of ubuntu ones
2876 if (rdev->mc.aper_size > config_aper_size)
2877 config_aper_size = rdev->mc.aper_size;
2879 if (config_aper_size > rdev->mc.real_vram_size)
2880 rdev->mc.mc_vram_size = config_aper_size;
2882 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
2886 void r100_vga_set_state(struct radeon_device *rdev, bool state)
2890 temp = RREG32(RADEON_CONFIG_CNTL);
2891 if (state == false) {
2892 temp &= ~RADEON_CFG_VGA_RAM_EN;
2893 temp |= RADEON_CFG_VGA_IO_DIS;
2895 temp &= ~RADEON_CFG_VGA_IO_DIS;
2897 WREG32(RADEON_CONFIG_CNTL, temp);
2900 static void r100_mc_init(struct radeon_device *rdev)
2904 r100_vram_get_type(rdev);
2905 r100_vram_init_sizes(rdev);
2906 base = rdev->mc.aper_base;
2907 if (rdev->flags & RADEON_IS_IGP)
2908 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
2909 radeon_vram_location(rdev, &rdev->mc, base);
2910 rdev->mc.gtt_base_align = 0;
2911 if (!(rdev->flags & RADEON_IS_AGP))
2912 radeon_gtt_location(rdev, &rdev->mc);
2913 radeon_update_bandwidth_info(rdev);
2918 * Indirect registers accessor
2920 void r100_pll_errata_after_index(struct radeon_device *rdev)
2922 if (rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS) {
2923 (void)RREG32(RADEON_CLOCK_CNTL_DATA);
2924 (void)RREG32(RADEON_CRTC_GEN_CNTL);
2928 static void r100_pll_errata_after_data(struct radeon_device *rdev)
2930 /* This workarounds is necessary on RV100, RS100 and RS200 chips
2931 * or the chip could hang on a subsequent access
2933 if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) {
2937 /* This function is required to workaround a hardware bug in some (all?)
2938 * revisions of the R300. This workaround should be called after every
2939 * CLOCK_CNTL_INDEX register access. If not, register reads afterward
2940 * may not be correct.
2942 if (rdev->pll_errata & CHIP_ERRATA_R300_CG) {
2945 save = RREG32(RADEON_CLOCK_CNTL_INDEX);
2946 tmp = save & ~(0x3f | RADEON_PLL_WR_EN);
2947 WREG32(RADEON_CLOCK_CNTL_INDEX, tmp);
2948 tmp = RREG32(RADEON_CLOCK_CNTL_DATA);
2949 WREG32(RADEON_CLOCK_CNTL_INDEX, save);
2953 uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
2957 WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
2958 r100_pll_errata_after_index(rdev);
2959 data = RREG32(RADEON_CLOCK_CNTL_DATA);
2960 r100_pll_errata_after_data(rdev);
2964 void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
2966 WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
2967 r100_pll_errata_after_index(rdev);
2968 WREG32(RADEON_CLOCK_CNTL_DATA, v);
2969 r100_pll_errata_after_data(rdev);
2972 static void r100_set_safe_registers(struct radeon_device *rdev)
2974 if (ASIC_IS_RN50(rdev)) {
2975 rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm;
2976 rdev->config.r100.reg_safe_bm_size = DRM_ARRAY_SIZE(rn50_reg_safe_bm);
2977 } else if (rdev->family < CHIP_R200) {
2978 rdev->config.r100.reg_safe_bm = r100_reg_safe_bm;
2979 rdev->config.r100.reg_safe_bm_size = DRM_ARRAY_SIZE(r100_reg_safe_bm);
2981 r200_set_safe_registers(rdev);
2988 #if defined(CONFIG_DEBUG_FS)
2989 static int r100_debugfs_rbbm_info(struct seq_file *m, void *data)
2991 struct drm_info_node *node = (struct drm_info_node *) m->private;
2992 struct drm_device *dev = node->minor->dev;
2993 struct radeon_device *rdev = dev->dev_private;
2994 uint32_t reg, value;
2997 seq_printf(m, "RBBM_STATUS 0x%08x\n", RREG32(RADEON_RBBM_STATUS));
2998 seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n", RREG32(0xE7C));
2999 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
3000 for (i = 0; i < 64; i++) {
3001 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100);
3002 reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA) - 1) >> 2;
3003 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i);
3004 value = RREG32(RADEON_RBBM_CMDFIFO_DATA);
3005 seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n", i, reg, value);
3010 static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
3012 struct drm_info_node *node = (struct drm_info_node *) m->private;
3013 struct drm_device *dev = node->minor->dev;
3014 struct radeon_device *rdev = dev->dev_private;
3015 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3017 unsigned count, i, j;
3019 radeon_ring_free_size(rdev, ring);
3020 rdp = RREG32(RADEON_CP_RB_RPTR);
3021 wdp = RREG32(RADEON_CP_RB_WPTR);
3022 count = (rdp + ring->ring_size - wdp) & ring->ptr_mask;
3023 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
3024 seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
3025 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
3026 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
3027 seq_printf(m, "%u dwords in ring\n", count);
3028 for (j = 0; j <= count; j++) {
3029 i = (rdp + j) & ring->ptr_mask;
3030 seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
3036 static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data)
3038 struct drm_info_node *node = (struct drm_info_node *) m->private;
3039 struct drm_device *dev = node->minor->dev;
3040 struct radeon_device *rdev = dev->dev_private;
3041 uint32_t csq_stat, csq2_stat, tmp;
3042 unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr;
3045 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
3046 seq_printf(m, "CP_CSQ_MODE 0x%08x\n", RREG32(RADEON_CP_CSQ_MODE));
3047 csq_stat = RREG32(RADEON_CP_CSQ_STAT);
3048 csq2_stat = RREG32(RADEON_CP_CSQ2_STAT);
3049 r_rptr = (csq_stat >> 0) & 0x3ff;
3050 r_wptr = (csq_stat >> 10) & 0x3ff;
3051 ib1_rptr = (csq_stat >> 20) & 0x3ff;
3052 ib1_wptr = (csq2_stat >> 0) & 0x3ff;
3053 ib2_rptr = (csq2_stat >> 10) & 0x3ff;
3054 ib2_wptr = (csq2_stat >> 20) & 0x3ff;
3055 seq_printf(m, "CP_CSQ_STAT 0x%08x\n", csq_stat);
3056 seq_printf(m, "CP_CSQ2_STAT 0x%08x\n", csq2_stat);
3057 seq_printf(m, "Ring rptr %u\n", r_rptr);
3058 seq_printf(m, "Ring wptr %u\n", r_wptr);
3059 seq_printf(m, "Indirect1 rptr %u\n", ib1_rptr);
3060 seq_printf(m, "Indirect1 wptr %u\n", ib1_wptr);
3061 seq_printf(m, "Indirect2 rptr %u\n", ib2_rptr);
3062 seq_printf(m, "Indirect2 wptr %u\n", ib2_wptr);
3063 /* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms
3064 * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */
3065 seq_printf(m, "Ring fifo:\n");
3066 for (i = 0; i < 256; i++) {
3067 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
3068 tmp = RREG32(RADEON_CP_CSQ_DATA);
3069 seq_printf(m, "rfifo[%04d]=0x%08X\n", i, tmp);
3071 seq_printf(m, "Indirect1 fifo:\n");
3072 for (i = 256; i <= 512; i++) {
3073 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
3074 tmp = RREG32(RADEON_CP_CSQ_DATA);
3075 seq_printf(m, "ib1fifo[%04d]=0x%08X\n", i, tmp);
3077 seq_printf(m, "Indirect2 fifo:\n");
3078 for (i = 640; i < ib1_wptr; i++) {
3079 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
3080 tmp = RREG32(RADEON_CP_CSQ_DATA);
3081 seq_printf(m, "ib2fifo[%04d]=0x%08X\n", i, tmp);
3086 static int r100_debugfs_mc_info(struct seq_file *m, void *data)
3088 struct drm_info_node *node = (struct drm_info_node *) m->private;
3089 struct drm_device *dev = node->minor->dev;
3090 struct radeon_device *rdev = dev->dev_private;
3093 tmp = RREG32(RADEON_CONFIG_MEMSIZE);
3094 seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n", tmp);
3095 tmp = RREG32(RADEON_MC_FB_LOCATION);
3096 seq_printf(m, "MC_FB_LOCATION 0x%08x\n", tmp);
3097 tmp = RREG32(RADEON_BUS_CNTL);
3098 seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
3099 tmp = RREG32(RADEON_MC_AGP_LOCATION);
3100 seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
3101 tmp = RREG32(RADEON_AGP_BASE);
3102 seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
3103 tmp = RREG32(RADEON_HOST_PATH_CNTL);
3104 seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
3105 tmp = RREG32(0x01D0);
3106 seq_printf(m, "AIC_CTRL 0x%08x\n", tmp);
3107 tmp = RREG32(RADEON_AIC_LO_ADDR);
3108 seq_printf(m, "AIC_LO_ADDR 0x%08x\n", tmp);
3109 tmp = RREG32(RADEON_AIC_HI_ADDR);
3110 seq_printf(m, "AIC_HI_ADDR 0x%08x\n", tmp);
3111 tmp = RREG32(0x01E4);
3112 seq_printf(m, "AIC_TLB_ADDR 0x%08x\n", tmp);
3116 static struct drm_info_list r100_debugfs_rbbm_list[] = {
3117 {"r100_rbbm_info", r100_debugfs_rbbm_info, 0, NULL},
3120 static struct drm_info_list r100_debugfs_cp_list[] = {
3121 {"r100_cp_ring_info", r100_debugfs_cp_ring_info, 0, NULL},
3122 {"r100_cp_csq_fifo", r100_debugfs_cp_csq_fifo, 0, NULL},
3125 static struct drm_info_list r100_debugfs_mc_info_list[] = {
3126 {"r100_mc_info", r100_debugfs_mc_info, 0, NULL},
3130 int r100_debugfs_rbbm_init(struct radeon_device *rdev)
3132 #if defined(CONFIG_DEBUG_FS)
3133 return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1);
3139 int r100_debugfs_cp_init(struct radeon_device *rdev)
3141 #if defined(CONFIG_DEBUG_FS)
3142 return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2);
3148 int r100_debugfs_mc_info_init(struct radeon_device *rdev)
3150 #if defined(CONFIG_DEBUG_FS)
3151 return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1);
3157 int r100_set_surface_reg(struct radeon_device *rdev, int reg,
3158 uint32_t tiling_flags, uint32_t pitch,
3159 uint32_t offset, uint32_t obj_size)
3161 int surf_index = reg * 16;
3164 if (rdev->family <= CHIP_RS200) {
3165 if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
3166 == (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
3167 flags |= RADEON_SURF_TILE_COLOR_BOTH;
3168 if (tiling_flags & RADEON_TILING_MACRO)
3169 flags |= RADEON_SURF_TILE_COLOR_MACRO;
3170 } else if (rdev->family <= CHIP_RV280) {
3171 if (tiling_flags & (RADEON_TILING_MACRO))
3172 flags |= R200_SURF_TILE_COLOR_MACRO;
3173 if (tiling_flags & RADEON_TILING_MICRO)
3174 flags |= R200_SURF_TILE_COLOR_MICRO;
3176 if (tiling_flags & RADEON_TILING_MACRO)
3177 flags |= R300_SURF_TILE_MACRO;
3178 if (tiling_flags & RADEON_TILING_MICRO)
3179 flags |= R300_SURF_TILE_MICRO;
3182 if (tiling_flags & RADEON_TILING_SWAP_16BIT)
3183 flags |= RADEON_SURF_AP0_SWP_16BPP | RADEON_SURF_AP1_SWP_16BPP;
3184 if (tiling_flags & RADEON_TILING_SWAP_32BIT)
3185 flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP;
3187 /* when we aren't tiling the pitch seems to needs to be furtherdivided down. - tested on power5 + rn50 server */
3188 if (tiling_flags & (RADEON_TILING_SWAP_16BIT | RADEON_TILING_SWAP_32BIT)) {
3189 if (!(tiling_flags & (RADEON_TILING_MACRO | RADEON_TILING_MICRO)))
3190 if (ASIC_IS_RN50(rdev))
3194 /* r100/r200 divide by 16 */
3195 if (rdev->family < CHIP_R300)
3196 flags |= pitch / 16;
3201 DRM_DEBUG_KMS("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1);
3202 WREG32(RADEON_SURFACE0_INFO + surf_index, flags);
3203 WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset);
3204 WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1);
3208 void r100_clear_surface_reg(struct radeon_device *rdev, int reg)
3210 int surf_index = reg * 16;
3211 WREG32(RADEON_SURFACE0_INFO + surf_index, 0);
3214 void r100_bandwidth_update(struct radeon_device *rdev)
3216 fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff;
3217 fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff;
3218 fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff;
3219 uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
3220 fixed20_12 memtcas_ff[8] = {
3225 dfixed_init_half(1),
3226 dfixed_init_half(2),
3229 fixed20_12 memtcas_rs480_ff[8] = {
3235 dfixed_init_half(1),
3236 dfixed_init_half(2),
3237 dfixed_init_half(3),
3239 fixed20_12 memtcas2_ff[8] = {
3249 fixed20_12 memtrbs[8] = {
3251 dfixed_init_half(1),
3253 dfixed_init_half(2),
3255 dfixed_init_half(3),
3259 fixed20_12 memtrbs_r4xx[8] = {
3269 fixed20_12 min_mem_eff;
3270 fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
3271 fixed20_12 cur_latency_mclk, cur_latency_sclk;
3272 fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate,
3273 disp_drain_rate2, read_return_rate;
3274 fixed20_12 time_disp1_drop_priority;
3276 int cur_size = 16; /* in octawords */
3277 int critical_point = 0, critical_point2;
3278 /* uint32_t read_return_rate, time_disp1_drop_priority; */
3279 int stop_req, max_stop_req;
3280 struct drm_display_mode *mode1 = NULL;
3281 struct drm_display_mode *mode2 = NULL;
3282 uint32_t pixel_bytes1 = 0;
3283 uint32_t pixel_bytes2 = 0;
3285 radeon_update_display_priority(rdev);
3287 if (rdev->mode_info.crtcs[0]->base.enabled) {
3288 mode1 = &rdev->mode_info.crtcs[0]->base.mode;
3289 pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8;
3291 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3292 if (rdev->mode_info.crtcs[1]->base.enabled) {
3293 mode2 = &rdev->mode_info.crtcs[1]->base.mode;
3294 pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8;
3298 min_mem_eff.full = dfixed_const_8(0);
3300 if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
3301 uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
3302 mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT);
3303 mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT);
3304 /* check crtc enables */
3306 mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT);
3308 mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT);
3309 WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer);
3313 * determine is there is enough bw for current mode
3315 sclk_ff = rdev->pm.sclk;
3316 mclk_ff = rdev->pm.mclk;
3318 temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
3319 temp_ff.full = dfixed_const(temp);
3320 mem_bw.full = dfixed_mul(mclk_ff, temp_ff);
3324 peak_disp_bw.full = 0;
3326 temp_ff.full = dfixed_const(1000);
3327 pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */
3328 pix_clk.full = dfixed_div(pix_clk, temp_ff);
3329 temp_ff.full = dfixed_const(pixel_bytes1);
3330 peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff);
3333 temp_ff.full = dfixed_const(1000);
3334 pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */
3335 pix_clk2.full = dfixed_div(pix_clk2, temp_ff);
3336 temp_ff.full = dfixed_const(pixel_bytes2);
3337 peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff);
3340 mem_bw.full = dfixed_mul(mem_bw, min_mem_eff);
3341 if (peak_disp_bw.full >= mem_bw.full) {
3342 DRM_ERROR("You may not have enough display bandwidth for current mode\n"
3343 "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
3346 /* Get values from the EXT_MEM_CNTL register...converting its contents. */
3347 temp = RREG32(RADEON_MEM_TIMING_CNTL);
3348 if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */
3349 mem_trcd = ((temp >> 2) & 0x3) + 1;
3350 mem_trp = ((temp & 0x3)) + 1;
3351 mem_tras = ((temp & 0x70) >> 4) + 1;
3352 } else if (rdev->family == CHIP_R300 ||
3353 rdev->family == CHIP_R350) { /* r300, r350 */
3354 mem_trcd = (temp & 0x7) + 1;
3355 mem_trp = ((temp >> 8) & 0x7) + 1;
3356 mem_tras = ((temp >> 11) & 0xf) + 4;
3357 } else if (rdev->family == CHIP_RV350 ||
3358 rdev->family <= CHIP_RV380) {
3360 mem_trcd = (temp & 0x7) + 3;
3361 mem_trp = ((temp >> 8) & 0x7) + 3;
3362 mem_tras = ((temp >> 11) & 0xf) + 6;
3363 } else if (rdev->family == CHIP_R420 ||
3364 rdev->family == CHIP_R423 ||
3365 rdev->family == CHIP_RV410) {
3367 mem_trcd = (temp & 0xf) + 3;
3370 mem_trp = ((temp >> 8) & 0xf) + 3;
3373 mem_tras = ((temp >> 12) & 0x1f) + 6;
3376 } else { /* RV200, R200 */
3377 mem_trcd = (temp & 0x7) + 1;
3378 mem_trp = ((temp >> 8) & 0x7) + 1;
3379 mem_tras = ((temp >> 12) & 0xf) + 4;
3382 trcd_ff.full = dfixed_const(mem_trcd);
3383 trp_ff.full = dfixed_const(mem_trp);
3384 tras_ff.full = dfixed_const(mem_tras);
3386 /* Get values from the MEM_SDRAM_MODE_REG register...converting its */
3387 temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
3388 data = (temp & (7 << 20)) >> 20;
3389 if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) {
3390 if (rdev->family == CHIP_RS480) /* don't think rs400 */
3391 tcas_ff = memtcas_rs480_ff[data];
3393 tcas_ff = memtcas_ff[data];
3395 tcas_ff = memtcas2_ff[data];
3397 if (rdev->family == CHIP_RS400 ||
3398 rdev->family == CHIP_RS480) {
3399 /* extra cas latency stored in bits 23-25 0-4 clocks */
3400 data = (temp >> 23) & 0x7;
3402 tcas_ff.full += dfixed_const(data);
3405 if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
3406 /* on the R300, Tcas is included in Trbs.
3408 temp = RREG32(RADEON_MEM_CNTL);
3409 data = (R300_MEM_NUM_CHANNELS_MASK & temp);
3411 if (R300_MEM_USE_CD_CH_ONLY & temp) {
3412 temp = RREG32(R300_MC_IND_INDEX);
3413 temp &= ~R300_MC_IND_ADDR_MASK;
3414 temp |= R300_MC_READ_CNTL_CD_mcind;
3415 WREG32(R300_MC_IND_INDEX, temp);
3416 temp = RREG32(R300_MC_IND_DATA);
3417 data = (R300_MEM_RBS_POSITION_C_MASK & temp);
3419 temp = RREG32(R300_MC_READ_CNTL_AB);
3420 data = (R300_MEM_RBS_POSITION_A_MASK & temp);
3423 temp = RREG32(R300_MC_READ_CNTL_AB);
3424 data = (R300_MEM_RBS_POSITION_A_MASK & temp);
3426 if (rdev->family == CHIP_RV410 ||
3427 rdev->family == CHIP_R420 ||
3428 rdev->family == CHIP_R423)
3429 trbs_ff = memtrbs_r4xx[data];
3431 trbs_ff = memtrbs[data];
3432 tcas_ff.full += trbs_ff.full;
3435 sclk_eff_ff.full = sclk_ff.full;
3437 if (rdev->flags & RADEON_IS_AGP) {
3438 fixed20_12 agpmode_ff;
3439 agpmode_ff.full = dfixed_const(radeon_agpmode);
3440 temp_ff.full = dfixed_const_666(16);
3441 sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff);
3443 /* TODO PCIE lanes may affect this - agpmode == 16?? */
3445 if (ASIC_IS_R300(rdev)) {
3446 sclk_delay_ff.full = dfixed_const(250);
3448 if ((rdev->family == CHIP_RV100) ||
3449 rdev->flags & RADEON_IS_IGP) {
3450 if (rdev->mc.vram_is_ddr)
3451 sclk_delay_ff.full = dfixed_const(41);
3453 sclk_delay_ff.full = dfixed_const(33);
3455 if (rdev->mc.vram_width == 128)
3456 sclk_delay_ff.full = dfixed_const(57);
3458 sclk_delay_ff.full = dfixed_const(41);
3462 mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff);
3464 if (rdev->mc.vram_is_ddr) {
3465 if (rdev->mc.vram_width == 32) {
3466 k1.full = dfixed_const(40);
3469 k1.full = dfixed_const(20);
3473 k1.full = dfixed_const(40);
3477 temp_ff.full = dfixed_const(2);
3478 mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff);
3479 temp_ff.full = dfixed_const(c);
3480 mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff);
3481 temp_ff.full = dfixed_const(4);
3482 mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff);
3483 mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff);
3484 mc_latency_mclk.full += k1.full;
3486 mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff);
3487 mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff);
3490 HW cursor time assuming worst case of full size colour cursor.
3492 temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
3493 temp_ff.full += trcd_ff.full;
3494 if (temp_ff.full < tras_ff.full)
3495 temp_ff.full = tras_ff.full;
3496 cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff);
3498 temp_ff.full = dfixed_const(cur_size);
3499 cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff);
3501 Find the total latency for the display data.
3503 disp_latency_overhead.full = dfixed_const(8);
3504 disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff);
3505 mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
3506 mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
3508 if (mc_latency_mclk.full > mc_latency_sclk.full)
3509 disp_latency.full = mc_latency_mclk.full;
3511 disp_latency.full = mc_latency_sclk.full;
3513 /* setup Max GRPH_STOP_REQ default value */
3514 if (ASIC_IS_RV100(rdev))
3515 max_stop_req = 0x5c;
3517 max_stop_req = 0x7c;
3521 Set GRPH_BUFFER_CNTL register using h/w defined optimal values.
3522 GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ]
3524 stop_req = mode1->hdisplay * pixel_bytes1 / 16;
3526 if (stop_req > max_stop_req)
3527 stop_req = max_stop_req;
3530 Find the drain rate of the display buffer.
3532 temp_ff.full = dfixed_const((16/pixel_bytes1));
3533 disp_drain_rate.full = dfixed_div(pix_clk, temp_ff);
3536 Find the critical point of the display buffer.
3538 crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency);
3539 crit_point_ff.full += dfixed_const_half(0);
3541 critical_point = dfixed_trunc(crit_point_ff);
3543 if (rdev->disp_priority == 2) {
3548 The critical point should never be above max_stop_req-4. Setting
3549 GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time.
3551 if (max_stop_req - critical_point < 4)
3554 if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) {
3555 /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/
3556 critical_point = 0x10;
3559 temp = RREG32(RADEON_GRPH_BUFFER_CNTL);
3560 temp &= ~(RADEON_GRPH_STOP_REQ_MASK);
3561 temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
3562 temp &= ~(RADEON_GRPH_START_REQ_MASK);
3563 if ((rdev->family == CHIP_R350) &&
3564 (stop_req > 0x15)) {
3567 temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
3568 temp |= RADEON_GRPH_BUFFER_SIZE;
3569 temp &= ~(RADEON_GRPH_CRITICAL_CNTL |
3570 RADEON_GRPH_CRITICAL_AT_SOF |
3571 RADEON_GRPH_STOP_CNTL);
3573 Write the result into the register.
3575 WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
3576 (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
3579 if ((rdev->family == CHIP_RS400) ||
3580 (rdev->family == CHIP_RS480)) {
3581 /* attempt to program RS400 disp regs correctly ??? */
3582 temp = RREG32(RS400_DISP1_REG_CNTL);
3583 temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK |
3584 RS400_DISP1_STOP_REQ_LEVEL_MASK);
3585 WREG32(RS400_DISP1_REQ_CNTL1, (temp |
3586 (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
3587 (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
3588 temp = RREG32(RS400_DMIF_MEM_CNTL1);
3589 temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK |
3590 RS400_DISP1_CRITICAL_POINT_STOP_MASK);
3591 WREG32(RS400_DMIF_MEM_CNTL1, (temp |
3592 (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) |
3593 (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT)));
3597 DRM_DEBUG_KMS("GRPH_BUFFER_CNTL from to %x\n",
3598 /* (unsigned int)info->SavedReg->grph_buffer_cntl, */
3599 (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL));
3604 stop_req = mode2->hdisplay * pixel_bytes2 / 16;
3606 if (stop_req > max_stop_req)
3607 stop_req = max_stop_req;
3610 Find the drain rate of the display buffer.
3612 temp_ff.full = dfixed_const((16/pixel_bytes2));
3613 disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff);
3615 grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
3616 grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
3617 grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
3618 grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK);
3619 if ((rdev->family == CHIP_R350) &&
3620 (stop_req > 0x15)) {
3623 grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
3624 grph2_cntl |= RADEON_GRPH_BUFFER_SIZE;
3625 grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL |
3626 RADEON_GRPH_CRITICAL_AT_SOF |
3627 RADEON_GRPH_STOP_CNTL);
3629 if ((rdev->family == CHIP_RS100) ||
3630 (rdev->family == CHIP_RS200))
3631 critical_point2 = 0;
3633 temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
3634 temp_ff.full = dfixed_const(temp);
3635 temp_ff.full = dfixed_mul(mclk_ff, temp_ff);
3636 if (sclk_ff.full < temp_ff.full)
3637 temp_ff.full = sclk_ff.full;
3639 read_return_rate.full = temp_ff.full;
3642 temp_ff.full = read_return_rate.full - disp_drain_rate.full;
3643 time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff);
3645 time_disp1_drop_priority.full = 0;
3647 crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
3648 crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2);
3649 crit_point_ff.full += dfixed_const_half(0);
3651 critical_point2 = dfixed_trunc(crit_point_ff);
3653 if (rdev->disp_priority == 2) {
3654 critical_point2 = 0;
3657 if (max_stop_req - critical_point2 < 4)
3658 critical_point2 = 0;
3662 if (critical_point2 == 0 && rdev->family == CHIP_R300) {
3663 /* some R300 cards have problem with this set to 0 */
3664 critical_point2 = 0x10;
3667 WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
3668 (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
3670 if ((rdev->family == CHIP_RS400) ||
3671 (rdev->family == CHIP_RS480)) {
3673 /* attempt to program RS400 disp2 regs correctly ??? */
3674 temp = RREG32(RS400_DISP2_REQ_CNTL1);
3675 temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK |
3676 RS400_DISP2_STOP_REQ_LEVEL_MASK);
3677 WREG32(RS400_DISP2_REQ_CNTL1, (temp |
3678 (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
3679 (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
3680 temp = RREG32(RS400_DISP2_REQ_CNTL2);
3681 temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK |
3682 RS400_DISP2_CRITICAL_POINT_STOP_MASK);
3683 WREG32(RS400_DISP2_REQ_CNTL2, (temp |
3684 (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) |
3685 (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT)));
3687 WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC);
3688 WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000);
3689 WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC);
3690 WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC);
3693 DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n",
3694 (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
3698 int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
3705 r = radeon_scratch_get(rdev, &scratch);
3707 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
3710 WREG32(scratch, 0xCAFEDEAD);
3711 r = radeon_ring_lock(rdev, ring, 2);
3713 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
3714 radeon_scratch_free(rdev, scratch);
3717 radeon_ring_write(ring, PACKET0(scratch, 0));
3718 radeon_ring_write(ring, 0xDEADBEEF);
3719 radeon_ring_unlock_commit(rdev, ring);
3720 for (i = 0; i < rdev->usec_timeout; i++) {
3721 tmp = RREG32(scratch);
3722 if (tmp == 0xDEADBEEF) {
3727 if (i < rdev->usec_timeout) {
3728 DRM_INFO("ring test succeeded in %d usecs\n", i);
3730 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
3734 radeon_scratch_free(rdev, scratch);
3738 void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3740 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3742 if (ring->rptr_save_reg) {
3743 u32 next_rptr = ring->wptr + 2 + 3;
3744 radeon_ring_write(ring, PACKET0(ring->rptr_save_reg, 0));
3745 radeon_ring_write(ring, next_rptr);
3748 radeon_ring_write(ring, PACKET0(RADEON_CP_IB_BASE, 1));
3749 radeon_ring_write(ring, ib->gpu_addr);
3750 radeon_ring_write(ring, ib->length_dw);
3753 int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3755 struct radeon_ib ib;
3761 r = radeon_scratch_get(rdev, &scratch);
3763 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
3766 WREG32(scratch, 0xCAFEDEAD);
3767 r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib, NULL, 256);
3769 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
3772 ib.ptr[0] = PACKET0(scratch, 0);
3773 ib.ptr[1] = 0xDEADBEEF;
3774 ib.ptr[2] = PACKET2(0);
3775 ib.ptr[3] = PACKET2(0);
3776 ib.ptr[4] = PACKET2(0);
3777 ib.ptr[5] = PACKET2(0);
3778 ib.ptr[6] = PACKET2(0);
3779 ib.ptr[7] = PACKET2(0);
3781 r = radeon_ib_schedule(rdev, &ib, NULL);
3783 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
3786 r = radeon_fence_wait(ib.fence, false);
3788 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
3791 for (i = 0; i < rdev->usec_timeout; i++) {
3792 tmp = RREG32(scratch);
3793 if (tmp == 0xDEADBEEF) {
3798 if (i < rdev->usec_timeout) {
3799 DRM_INFO("ib test succeeded in %u usecs\n", i);
3801 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
3806 radeon_ib_free(rdev, &ib);
3808 radeon_scratch_free(rdev, scratch);
3812 void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
3814 /* Shutdown CP we shouldn't need to do that but better be safe than
3817 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
3818 WREG32(R_000740_CP_CSQ_CNTL, 0);
3820 /* Save few CRTC registers */
3821 save->GENMO_WT = RREG8(R_0003C2_GENMO_WT);
3822 save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL);
3823 save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL);
3824 save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET);
3825 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3826 save->CRTC2_GEN_CNTL = RREG32(R_0003F8_CRTC2_GEN_CNTL);
3827 save->CUR2_OFFSET = RREG32(R_000360_CUR2_OFFSET);
3830 /* Disable VGA aperture access */
3831 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & save->GENMO_WT);
3832 /* Disable cursor, overlay, crtc */
3833 WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1));
3834 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL |
3835 S_000054_CRTC_DISPLAY_DIS(1));
3836 WREG32(R_000050_CRTC_GEN_CNTL,
3837 (C_000050_CRTC_CUR_EN & save->CRTC_GEN_CNTL) |
3838 S_000050_CRTC_DISP_REQ_EN_B(1));
3839 WREG32(R_000420_OV0_SCALE_CNTL,
3840 C_000420_OV0_OVERLAY_EN & RREG32(R_000420_OV0_SCALE_CNTL));
3841 WREG32(R_000260_CUR_OFFSET, C_000260_CUR_LOCK & save->CUR_OFFSET);
3842 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3843 WREG32(R_000360_CUR2_OFFSET, save->CUR2_OFFSET |
3844 S_000360_CUR2_LOCK(1));
3845 WREG32(R_0003F8_CRTC2_GEN_CNTL,
3846 (C_0003F8_CRTC2_CUR_EN & save->CRTC2_GEN_CNTL) |
3847 S_0003F8_CRTC2_DISPLAY_DIS(1) |
3848 S_0003F8_CRTC2_DISP_REQ_EN_B(1));
3849 WREG32(R_000360_CUR2_OFFSET,
3850 C_000360_CUR2_LOCK & save->CUR2_OFFSET);
3854 void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save)
3856 /* Update base address for crtc */
3857 WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
3858 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3859 WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
3861 /* Restore CRTC registers */
3862 WREG8(R_0003C2_GENMO_WT, save->GENMO_WT);
3863 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL);
3864 WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL);
3865 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3866 WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL);
3870 void r100_vga_render_disable(struct radeon_device *rdev)
3874 tmp = RREG8(R_0003C2_GENMO_WT);
3875 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & tmp);
3878 static void r100_debugfs(struct radeon_device *rdev)
3882 r = r100_debugfs_mc_info_init(rdev);
3884 dev_warn(rdev->dev, "Failed to create r100_mc debugfs file.\n");
3887 static void r100_mc_program(struct radeon_device *rdev)
3889 struct r100_mc_save save;
3891 /* Stops all mc clients */
3892 r100_mc_stop(rdev, &save);
3893 if (rdev->flags & RADEON_IS_AGP) {
3894 WREG32(R_00014C_MC_AGP_LOCATION,
3895 S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) |
3896 S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
3897 WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
3898 if (rdev->family > CHIP_RV200)
3899 WREG32(R_00015C_AGP_BASE_2,
3900 upper_32_bits(rdev->mc.agp_base) & 0xff);
3902 WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF);
3903 WREG32(R_000170_AGP_BASE, 0);
3904 if (rdev->family > CHIP_RV200)
3905 WREG32(R_00015C_AGP_BASE_2, 0);
3907 /* Wait for mc idle */
3908 if (r100_mc_wait_for_idle(rdev))
3909 dev_warn(rdev->dev, "Wait for MC idle timeout.\n");
3910 /* Program MC, should be a 32bits limited address space */
3911 WREG32(R_000148_MC_FB_LOCATION,
3912 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
3913 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
3914 r100_mc_resume(rdev, &save);
3917 static void r100_clock_startup(struct radeon_device *rdev)
3921 if (radeon_dynclks != -1 && radeon_dynclks)
3922 radeon_legacy_set_clock_gating(rdev, 1);
3923 /* We need to force on some of the block */
3924 tmp = RREG32_PLL(R_00000D_SCLK_CNTL);
3925 tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
3926 if ((rdev->family == CHIP_RV250) || (rdev->family == CHIP_RV280))
3927 tmp |= S_00000D_FORCE_DISP1(1) | S_00000D_FORCE_DISP2(1);
3928 WREG32_PLL(R_00000D_SCLK_CNTL, tmp);
3931 static int r100_startup(struct radeon_device *rdev)
3935 /* set common regs */
3936 r100_set_common_regs(rdev);
3938 r100_mc_program(rdev);
3940 r100_clock_startup(rdev);
3941 /* Initialize GART (initialize after TTM so we can allocate
3942 * memory through TTM but finalize after TTM) */
3943 r100_enable_bm(rdev);
3944 if (rdev->flags & RADEON_IS_PCI) {
3945 r = r100_pci_gart_enable(rdev);
3950 /* allocate wb buffer */
3951 r = radeon_wb_init(rdev);
3955 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
3957 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
3963 rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
3964 /* 1M ring buffer */
3965 r = r100_cp_init(rdev, 1024 * 1024);
3967 dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
3971 r = radeon_ib_pool_init(rdev);
3973 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
3980 int r100_resume(struct radeon_device *rdev)
3984 /* Make sur GART are not working */
3985 if (rdev->flags & RADEON_IS_PCI)
3986 r100_pci_gart_disable(rdev);
3987 /* Resume clock before doing reset */
3988 r100_clock_startup(rdev);
3989 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
3990 if (radeon_asic_reset(rdev)) {
3991 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
3992 RREG32(R_000E40_RBBM_STATUS),
3993 RREG32(R_0007C0_CP_STAT));
3996 radeon_combios_asic_init(rdev->ddev);
3997 /* Resume clock after posting */
3998 r100_clock_startup(rdev);
3999 /* Initialize surface registers */
4000 radeon_surface_init(rdev);
4002 rdev->accel_working = true;
4003 r = r100_startup(rdev);
4005 rdev->accel_working = false;
4010 int r100_suspend(struct radeon_device *rdev)
4012 r100_cp_disable(rdev);
4013 radeon_wb_disable(rdev);
4014 r100_irq_disable(rdev);
4015 if (rdev->flags & RADEON_IS_PCI)
4016 r100_pci_gart_disable(rdev);
4020 void r100_fini(struct radeon_device *rdev)
4023 radeon_wb_fini(rdev);
4024 radeon_ib_pool_fini(rdev);
4025 radeon_gem_fini(rdev);
4026 if (rdev->flags & RADEON_IS_PCI)
4027 r100_pci_gart_fini(rdev);
4028 radeon_agp_fini(rdev);
4029 radeon_irq_kms_fini(rdev);
4030 radeon_fence_driver_fini(rdev);
4031 radeon_bo_fini(rdev);
4032 radeon_atombios_fini(rdev);
4033 r100_cp_fini_microcode(rdev);
4034 free(rdev->bios, DRM_MEM_DRIVER);
4039 * Due to how kexec works, it can leave the hw fully initialised when it
4040 * boots the new kernel. However doing our init sequence with the CP and
4041 * WB stuff setup causes GPU hangs on the RN50 at least. So at startup
4042 * do some quick sanity checks and restore sane values to avoid this
4045 void r100_restore_sanity(struct radeon_device *rdev)
4049 tmp = RREG32(RADEON_CP_CSQ_CNTL);
4051 WREG32(RADEON_CP_CSQ_CNTL, 0);
4053 tmp = RREG32(RADEON_CP_RB_CNTL);
4055 WREG32(RADEON_CP_RB_CNTL, 0);
4057 tmp = RREG32(RADEON_SCRATCH_UMSK);
4059 WREG32(RADEON_SCRATCH_UMSK, 0);
4063 int r100_init(struct radeon_device *rdev)
4067 /* Register debugfs file specific to this group of asics */
4070 r100_vga_render_disable(rdev);
4071 /* Initialize scratch registers */
4072 radeon_scratch_init(rdev);
4073 /* Initialize surface registers */
4074 radeon_surface_init(rdev);
4075 /* sanity check some register to avoid hangs like after kexec */
4076 r100_restore_sanity(rdev);
4077 /* TODO: disable VGA need to use VGA request */
4079 if (!radeon_get_bios(rdev)) {
4080 if (ASIC_IS_AVIVO(rdev))
4083 if (rdev->is_atom_bios) {
4084 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
4087 r = radeon_combios_init(rdev);
4091 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
4092 if (radeon_asic_reset(rdev)) {
4094 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
4095 RREG32(R_000E40_RBBM_STATUS),
4096 RREG32(R_0007C0_CP_STAT));
4098 /* check if cards are posted or not */
4099 if (radeon_boot_test_post_card(rdev) == false)
4101 /* Set asic errata */
4103 /* Initialize clocks */
4104 radeon_get_clock_info(rdev->ddev);
4105 /* initialize AGP */
4106 if (rdev->flags & RADEON_IS_AGP) {
4107 r = radeon_agp_init(rdev);
4109 radeon_agp_disable(rdev);
4112 /* initialize VRAM */
4115 r = radeon_fence_driver_init(rdev);
4118 r = radeon_irq_kms_init(rdev);
4121 /* Memory manager */
4122 r = radeon_bo_init(rdev);
4125 if (rdev->flags & RADEON_IS_PCI) {
4126 r = r100_pci_gart_init(rdev);
4130 r100_set_safe_registers(rdev);
4132 rdev->accel_working = true;
4133 r = r100_startup(rdev);
4135 /* Somethings want wront with the accel init stop accel */
4136 dev_err(rdev->dev, "Disabling GPU acceleration\n");
4138 radeon_wb_fini(rdev);
4139 radeon_ib_pool_fini(rdev);
4140 radeon_irq_kms_fini(rdev);
4141 if (rdev->flags & RADEON_IS_PCI)
4142 r100_pci_gart_fini(rdev);
4143 rdev->accel_working = false;
4148 uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
4149 bool always_indirect)
4151 if (reg < rdev->rmmio_size && !always_indirect)
4152 return bus_read_4(rdev->rmmio, reg);
4154 unsigned long flags;
4157 DRM_SPINLOCK_IRQSAVE(&rdev->mmio_idx_lock, flags);
4158 bus_write_4(rdev->rmmio, RADEON_MM_INDEX, reg);
4159 ret = bus_read_4(rdev->rmmio, RADEON_MM_DATA);
4160 DRM_SPINUNLOCK_IRQRESTORE(&rdev->mmio_idx_lock, flags);
4166 void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
4167 bool always_indirect)
4169 if (reg < rdev->rmmio_size && !always_indirect)
4170 bus_write_4(rdev->rmmio, reg, v);
4172 unsigned long flags;
4174 DRM_SPINLOCK_IRQSAVE(&rdev->mmio_idx_lock, flags);
4175 bus_write_4(rdev->rmmio, RADEON_MM_INDEX, reg);
4176 bus_write_4(rdev->rmmio, RADEON_MM_DATA, v);
4177 DRM_SPINUNLOCK_IRQRESTORE(&rdev->mmio_idx_lock, flags);
4181 u32 r100_io_rreg(struct radeon_device *rdev, u32 reg)
4183 if (reg < rdev->rio_mem_size)
4184 return bus_read_4(rdev->rio_mem, reg);
4186 /* XXX No locking? -- dumbbell@ */
4187 bus_write_4(rdev->rio_mem, RADEON_MM_INDEX, reg);
4188 return bus_read_4(rdev->rio_mem, RADEON_MM_DATA);
4192 void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v)
4194 if (reg < rdev->rio_mem_size)
4195 bus_write_4(rdev->rio_mem, reg, v);
4197 /* XXX No locking? -- dumbbell@ */
4198 bus_write_4(rdev->rio_mem, RADEON_MM_INDEX, reg);
4199 bus_write_4(rdev->rio_mem, RADEON_MM_DATA, v);