]> CyberLeo.Net >> Repos - FreeBSD/releng/10.0.git/blob - sys/dev/drm2/radeon/rs600.c
- Copy stable/10 (r259064) to releng/10.0 as part of the
[FreeBSD/releng/10.0.git] / sys / dev / drm2 / radeon / rs600.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 /* RS600 / Radeon X1250/X1270 integrated GPU
29  *
30  * This file gather function specific to RS600 which is the IGP of
31  * the X1250/X1270 family supporting intel CPU (while RS690/RS740
32  * is the X1250/X1270 supporting AMD CPU). The display engine are
33  * the avivo one, bios is an atombios, 3D block are the one of the
34  * R4XX family. The GART is different from the RS400 one and is very
35  * close to the one of the R600 family (R600 likely being an evolution
36  * of the RS600 GART block).
37  */
38
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
41
42 #include <dev/drm2/drmP.h>
43 #include "radeon.h"
44 #include "radeon_asic.h"
45 #include "atom.h"
46 #include "rs600d.h"
47
48 #include "rs600_reg_safe.h"
49
50 static void rs600_gpu_init(struct radeon_device *rdev);
51
52 static const u32 crtc_offsets[2] =
53 {
54         0,
55         AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
56 };
57
58 void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc)
59 {
60         int i;
61
62         if (crtc >= rdev->num_crtc)
63                 return;
64
65         if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[crtc]) & AVIVO_CRTC_EN) {
66                 for (i = 0; i < rdev->usec_timeout; i++) {
67                         if (!(RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK))
68                                 break;
69                         DRM_UDELAY(1);
70                 }
71                 for (i = 0; i < rdev->usec_timeout; i++) {
72                         if (RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK)
73                                 break;
74                         DRM_UDELAY(1);
75                 }
76         }
77 }
78
79 void rs600_pre_page_flip(struct radeon_device *rdev, int crtc)
80 {
81         /* enable the pflip int */
82         radeon_irq_kms_pflip_irq_get(rdev, crtc);
83 }
84
85 void rs600_post_page_flip(struct radeon_device *rdev, int crtc)
86 {
87         /* disable the pflip int */
88         radeon_irq_kms_pflip_irq_put(rdev, crtc);
89 }
90
91 u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
92 {
93         struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
94         u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
95         int i;
96
97         /* Lock the graphics update lock */
98         tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
99         WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
100
101         /* update the scanout addresses */
102         WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
103                (u32)crtc_base);
104         WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
105                (u32)crtc_base);
106
107         /* Wait for update_pending to go high. */
108         for (i = 0; i < rdev->usec_timeout; i++) {
109                 if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
110                         break;
111                 DRM_UDELAY(1);
112         }
113         DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
114
115         /* Unlock the lock, so double-buffering can take place inside vblank */
116         tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
117         WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
118
119         /* Return current update_pending status: */
120         return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
121 }
122
123 void rs600_pm_misc(struct radeon_device *rdev)
124 {
125         int requested_index = rdev->pm.requested_power_state_index;
126         struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
127         struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
128         u32 tmp, dyn_pwrmgt_sclk_length, dyn_sclk_vol_cntl;
129         u32 hdp_dyn_cntl, /*mc_host_dyn_cntl,*/ dyn_backbias_cntl;
130
131         if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
132                 if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
133                         tmp = RREG32(voltage->gpio.reg);
134                         if (voltage->active_high)
135                                 tmp |= voltage->gpio.mask;
136                         else
137                                 tmp &= ~(voltage->gpio.mask);
138                         WREG32(voltage->gpio.reg, tmp);
139                         if (voltage->delay)
140                                 DRM_UDELAY(voltage->delay);
141                 } else {
142                         tmp = RREG32(voltage->gpio.reg);
143                         if (voltage->active_high)
144                                 tmp &= ~voltage->gpio.mask;
145                         else
146                                 tmp |= voltage->gpio.mask;
147                         WREG32(voltage->gpio.reg, tmp);
148                         if (voltage->delay)
149                                 DRM_UDELAY(voltage->delay);
150                 }
151         } else if (voltage->type == VOLTAGE_VDDC)
152                 radeon_atom_set_voltage(rdev, voltage->vddc_id, SET_VOLTAGE_TYPE_ASIC_VDDC);
153
154         dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH);
155         dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf);
156         dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_LOLEN(0xf);
157         if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
158                 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) {
159                         dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(2);
160                         dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(2);
161                 } else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) {
162                         dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(4);
163                         dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(4);
164                 }
165         } else {
166                 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(1);
167                 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(1);
168         }
169         WREG32_PLL(DYN_PWRMGT_SCLK_LENGTH, dyn_pwrmgt_sclk_length);
170
171         dyn_sclk_vol_cntl = RREG32_PLL(DYN_SCLK_VOL_CNTL);
172         if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
173                 dyn_sclk_vol_cntl |= IO_CG_VOLTAGE_DROP;
174                 if (voltage->delay) {
175                         dyn_sclk_vol_cntl |= VOLTAGE_DROP_SYNC;
176                         dyn_sclk_vol_cntl |= VOLTAGE_DELAY_SEL(voltage->delay);
177                 } else
178                         dyn_sclk_vol_cntl &= ~VOLTAGE_DROP_SYNC;
179         } else
180                 dyn_sclk_vol_cntl &= ~IO_CG_VOLTAGE_DROP;
181         WREG32_PLL(DYN_SCLK_VOL_CNTL, dyn_sclk_vol_cntl);
182
183         hdp_dyn_cntl = RREG32_PLL(HDP_DYN_CNTL);
184         if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
185                 hdp_dyn_cntl &= ~HDP_FORCEON;
186         else
187                 hdp_dyn_cntl |= HDP_FORCEON;
188         WREG32_PLL(HDP_DYN_CNTL, hdp_dyn_cntl);
189 #if 0
190         /* mc_host_dyn seems to cause hangs from time to time */
191         mc_host_dyn_cntl = RREG32_PLL(MC_HOST_DYN_CNTL);
192         if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN)
193                 mc_host_dyn_cntl &= ~MC_HOST_FORCEON;
194         else
195                 mc_host_dyn_cntl |= MC_HOST_FORCEON;
196         WREG32_PLL(MC_HOST_DYN_CNTL, mc_host_dyn_cntl);
197 #endif
198         dyn_backbias_cntl = RREG32_PLL(DYN_BACKBIAS_CNTL);
199         if (ps->misc & ATOM_PM_MISCINFO2_DYNAMIC_BACK_BIAS_EN)
200                 dyn_backbias_cntl |= IO_CG_BACKBIAS_EN;
201         else
202                 dyn_backbias_cntl &= ~IO_CG_BACKBIAS_EN;
203         WREG32_PLL(DYN_BACKBIAS_CNTL, dyn_backbias_cntl);
204
205         /* set pcie lanes */
206         if ((rdev->flags & RADEON_IS_PCIE) &&
207             !(rdev->flags & RADEON_IS_IGP) &&
208             rdev->asic->pm.set_pcie_lanes &&
209             (ps->pcie_lanes !=
210              rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
211                 radeon_set_pcie_lanes(rdev,
212                                       ps->pcie_lanes);
213                 DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes);
214         }
215 }
216
217 void rs600_pm_prepare(struct radeon_device *rdev)
218 {
219         struct drm_device *ddev = rdev->ddev;
220         struct drm_crtc *crtc;
221         struct radeon_crtc *radeon_crtc;
222         u32 tmp;
223
224         /* disable any active CRTCs */
225         list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
226                 radeon_crtc = to_radeon_crtc(crtc);
227                 if (radeon_crtc->enabled) {
228                         tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset);
229                         tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
230                         WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
231                 }
232         }
233 }
234
235 void rs600_pm_finish(struct radeon_device *rdev)
236 {
237         struct drm_device *ddev = rdev->ddev;
238         struct drm_crtc *crtc;
239         struct radeon_crtc *radeon_crtc;
240         u32 tmp;
241
242         /* enable any active CRTCs */
243         list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
244                 radeon_crtc = to_radeon_crtc(crtc);
245                 if (radeon_crtc->enabled) {
246                         tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset);
247                         tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
248                         WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
249                 }
250         }
251 }
252
253 /* hpd for digital panel detect/disconnect */
254 bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
255 {
256         u32 tmp;
257         bool connected = false;
258
259         switch (hpd) {
260         case RADEON_HPD_1:
261                 tmp = RREG32(R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS);
262                 if (G_007D04_DC_HOT_PLUG_DETECT1_SENSE(tmp))
263                         connected = true;
264                 break;
265         case RADEON_HPD_2:
266                 tmp = RREG32(R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS);
267                 if (G_007D14_DC_HOT_PLUG_DETECT2_SENSE(tmp))
268                         connected = true;
269                 break;
270         default:
271                 break;
272         }
273         return connected;
274 }
275
276 void rs600_hpd_set_polarity(struct radeon_device *rdev,
277                             enum radeon_hpd_id hpd)
278 {
279         u32 tmp;
280         bool connected = rs600_hpd_sense(rdev, hpd);
281
282         switch (hpd) {
283         case RADEON_HPD_1:
284                 tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
285                 if (connected)
286                         tmp &= ~S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1);
287                 else
288                         tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1);
289                 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
290                 break;
291         case RADEON_HPD_2:
292                 tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
293                 if (connected)
294                         tmp &= ~S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1);
295                 else
296                         tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1);
297                 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
298                 break;
299         default:
300                 break;
301         }
302 }
303
304 void rs600_hpd_init(struct radeon_device *rdev)
305 {
306         struct drm_device *dev = rdev->ddev;
307         struct drm_connector *connector;
308         unsigned enable = 0;
309
310         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
311                 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
312                 switch (radeon_connector->hpd.hpd) {
313                 case RADEON_HPD_1:
314                         WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
315                                S_007D00_DC_HOT_PLUG_DETECT1_EN(1));
316                         break;
317                 case RADEON_HPD_2:
318                         WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
319                                S_007D10_DC_HOT_PLUG_DETECT2_EN(1));
320                         break;
321                 default:
322                         break;
323                 }
324                 enable |= 1 << radeon_connector->hpd.hpd;
325                 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
326         }
327         radeon_irq_kms_enable_hpd(rdev, enable);
328 }
329
330 void rs600_hpd_fini(struct radeon_device *rdev)
331 {
332         struct drm_device *dev = rdev->ddev;
333         struct drm_connector *connector;
334         unsigned disable = 0;
335
336         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
337                 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
338                 switch (radeon_connector->hpd.hpd) {
339                 case RADEON_HPD_1:
340                         WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
341                                S_007D00_DC_HOT_PLUG_DETECT1_EN(0));
342                         break;
343                 case RADEON_HPD_2:
344                         WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
345                                S_007D10_DC_HOT_PLUG_DETECT2_EN(0));
346                         break;
347                 default:
348                         break;
349                 }
350                 disable |= 1 << radeon_connector->hpd.hpd;
351         }
352         radeon_irq_kms_disable_hpd(rdev, disable);
353 }
354
355 int rs600_asic_reset(struct radeon_device *rdev)
356 {
357         struct rv515_mc_save save;
358         u32 status, tmp;
359         int ret = 0;
360
361         status = RREG32(R_000E40_RBBM_STATUS);
362         if (!G_000E40_GUI_ACTIVE(status)) {
363                 return 0;
364         }
365         /* Stops all mc clients */
366         rv515_mc_stop(rdev, &save);
367         status = RREG32(R_000E40_RBBM_STATUS);
368         dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
369         /* stop CP */
370         WREG32(RADEON_CP_CSQ_CNTL, 0);
371         tmp = RREG32(RADEON_CP_RB_CNTL);
372         WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
373         WREG32(RADEON_CP_RB_RPTR_WR, 0);
374         WREG32(RADEON_CP_RB_WPTR, 0);
375         WREG32(RADEON_CP_RB_CNTL, tmp);
376         pci_save_state(device_get_parent(rdev->dev));
377         /* disable bus mastering */
378         pci_disable_busmaster(rdev->dev);
379         DRM_MDELAY(1);
380         /* reset GA+VAP */
381         WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
382                                         S_0000F0_SOFT_RESET_GA(1));
383         RREG32(R_0000F0_RBBM_SOFT_RESET);
384         DRM_MDELAY(500);
385         WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
386         DRM_MDELAY(1);
387         status = RREG32(R_000E40_RBBM_STATUS);
388         dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
389         /* reset CP */
390         WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
391         RREG32(R_0000F0_RBBM_SOFT_RESET);
392         DRM_MDELAY(500);
393         WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
394         DRM_MDELAY(1);
395         status = RREG32(R_000E40_RBBM_STATUS);
396         dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
397         /* reset MC */
398         WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_MC(1));
399         RREG32(R_0000F0_RBBM_SOFT_RESET);
400         DRM_MDELAY(500);
401         WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
402         DRM_MDELAY(1);
403         status = RREG32(R_000E40_RBBM_STATUS);
404         dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
405         /* restore PCI & busmastering */
406         pci_restore_state(device_get_parent(rdev->dev));
407         /* Check if GPU is idle */
408         if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
409                 dev_err(rdev->dev, "failed to reset GPU\n");
410                 ret = -1;
411         } else
412                 dev_info(rdev->dev, "GPU reset succeed\n");
413         rv515_mc_resume(rdev, &save);
414         return ret;
415 }
416
417 /*
418  * GART.
419  */
420 void rs600_gart_tlb_flush(struct radeon_device *rdev)
421 {
422         uint32_t tmp;
423
424         tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
425         tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE;
426         WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
427
428         tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
429         tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) | S_000100_INVALIDATE_L2_CACHE(1);
430         WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
431
432         tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
433         tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE;
434         WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
435         tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
436 }
437
438 static int rs600_gart_init(struct radeon_device *rdev)
439 {
440         int r;
441
442         if (rdev->gart.robj) {
443                 DRM_ERROR("RS600 GART already initialized\n");
444                 return 0;
445         }
446         /* Initialize common gart structure */
447         r = radeon_gart_init(rdev);
448         if (r) {
449                 return r;
450         }
451         rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
452         return radeon_gart_table_vram_alloc(rdev);
453 }
454
455 static int rs600_gart_enable(struct radeon_device *rdev)
456 {
457         u32 tmp;
458         int r, i;
459
460         if (rdev->gart.robj == NULL) {
461                 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
462                 return -EINVAL;
463         }
464         r = radeon_gart_table_vram_pin(rdev);
465         if (r)
466                 return r;
467         radeon_gart_restore(rdev);
468         /* Enable bus master */
469         tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
470         WREG32(RADEON_BUS_CNTL, tmp);
471         /* FIXME: setup default page */
472         WREG32_MC(R_000100_MC_PT0_CNTL,
473                   (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
474                    S_000100_EFFECTIVE_L2_QUEUE_SIZE(6)));
475
476         for (i = 0; i < 19; i++) {
477                 WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i,
478                           S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) |
479                           S_00016C_SYSTEM_ACCESS_MODE_MASK(
480                                   V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS) |
481                           S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(
482                                   V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH) |
483                           S_00016C_EFFECTIVE_L1_CACHE_SIZE(3) |
484                           S_00016C_ENABLE_FRAGMENT_PROCESSING(1) |
485                           S_00016C_EFFECTIVE_L1_QUEUE_SIZE(3));
486         }
487         /* enable first context */
488         WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL,
489                   S_000102_ENABLE_PAGE_TABLE(1) |
490                   S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT));
491
492         /* disable all other contexts */
493         for (i = 1; i < 8; i++)
494                 WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0);
495
496         /* setup the page table */
497         WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR,
498                   rdev->gart.table_addr);
499         WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start);
500         WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end);
501         WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0);
502
503         /* System context maps to VRAM space */
504         WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start);
505         WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end);
506
507         /* enable page tables */
508         tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
509         WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1)));
510         tmp = RREG32_MC(R_000009_MC_CNTL1);
511         WREG32_MC(R_000009_MC_CNTL1, (tmp | S_000009_ENABLE_PAGE_TABLES(1)));
512         rs600_gart_tlb_flush(rdev);
513         DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
514                  (unsigned)(rdev->mc.gtt_size >> 20),
515                  (unsigned long long)rdev->gart.table_addr);
516         rdev->gart.ready = true;
517         return 0;
518 }
519
520 static void rs600_gart_disable(struct radeon_device *rdev)
521 {
522         u32 tmp;
523
524         /* FIXME: disable out of gart access */
525         WREG32_MC(R_000100_MC_PT0_CNTL, 0);
526         tmp = RREG32_MC(R_000009_MC_CNTL1);
527         WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES);
528         radeon_gart_table_vram_unpin(rdev);
529 }
530
531 static void rs600_gart_fini(struct radeon_device *rdev)
532 {
533         radeon_gart_fini(rdev);
534         rs600_gart_disable(rdev);
535         radeon_gart_table_vram_free(rdev);
536 }
537
538 #define R600_PTE_VALID     (1 << 0)
539 #define R600_PTE_SYSTEM    (1 << 1)
540 #define R600_PTE_SNOOPED   (1 << 2)
541 #define R600_PTE_READABLE  (1 << 5)
542 #define R600_PTE_WRITEABLE (1 << 6)
543
544 int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
545 {
546         uint64_t *ptr = rdev->gart.ptr;
547
548         if (i < 0 || i > rdev->gart.num_gpu_pages) {
549                 return -EINVAL;
550         }
551         addr = addr & 0xFFFFFFFFFFFFF000ULL;
552         addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED;
553         addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE;
554         ptr[i] = addr;
555         return 0;
556 }
557
558 int rs600_irq_set(struct radeon_device *rdev)
559 {
560         uint32_t tmp = 0;
561         uint32_t mode_int = 0;
562         u32 hpd1 = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL) &
563                 ~S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
564         u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) &
565                 ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
566         u32 hdmi0;
567         if (ASIC_IS_DCE2(rdev))
568                 hdmi0 = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL) &
569                         ~S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1);
570         else
571                 hdmi0 = 0;
572
573         if (!rdev->irq.installed) {
574                 DRM_ERROR("Can't enable IRQ/MSI because no handler is installed\n");
575                 WREG32(R_000040_GEN_INT_CNTL, 0);
576                 return -EINVAL;
577         }
578         if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
579                 tmp |= S_000040_SW_INT_EN(1);
580         }
581         if (rdev->irq.crtc_vblank_int[0] ||
582             atomic_read(&rdev->irq.pflip[0])) {
583                 mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
584         }
585         if (rdev->irq.crtc_vblank_int[1] ||
586             atomic_read(&rdev->irq.pflip[1])) {
587                 mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1);
588         }
589         if (rdev->irq.hpd[0]) {
590                 hpd1 |= S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
591         }
592         if (rdev->irq.hpd[1]) {
593                 hpd2 |= S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
594         }
595         if (rdev->irq.afmt[0]) {
596                 hdmi0 |= S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1);
597         }
598         WREG32(R_000040_GEN_INT_CNTL, tmp);
599         WREG32(R_006540_DxMODE_INT_MASK, mode_int);
600         WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
601         WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
602         if (ASIC_IS_DCE2(rdev))
603                 WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
604         return 0;
605 }
606
607 static inline u32 rs600_irq_ack(struct radeon_device *rdev)
608 {
609         uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
610         uint32_t irq_mask = S_000044_SW_INT(1);
611         u32 tmp;
612
613         if (G_000044_DISPLAY_INT_STAT(irqs)) {
614                 rdev->irq.stat_regs.r500.disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
615                 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
616                         WREG32(R_006534_D1MODE_VBLANK_STATUS,
617                                 S_006534_D1MODE_VBLANK_ACK(1));
618                 }
619                 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
620                         WREG32(R_006D34_D2MODE_VBLANK_STATUS,
621                                 S_006D34_D2MODE_VBLANK_ACK(1));
622                 }
623                 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
624                         tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
625                         tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1);
626                         WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
627                 }
628                 if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
629                         tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
630                         tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1);
631                         WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
632                 }
633         } else {
634                 rdev->irq.stat_regs.r500.disp_int = 0;
635         }
636
637         if (ASIC_IS_DCE2(rdev)) {
638                 rdev->irq.stat_regs.r500.hdmi0_status = RREG32(R_007404_HDMI0_STATUS) &
639                         S_007404_HDMI0_AZ_FORMAT_WTRIG(1);
640                 if (G_007404_HDMI0_AZ_FORMAT_WTRIG(rdev->irq.stat_regs.r500.hdmi0_status)) {
641                         tmp = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL);
642                         tmp |= S_007408_HDMI0_AZ_FORMAT_WTRIG_ACK(1);
643                         WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, tmp);
644                 }
645         } else
646                 rdev->irq.stat_regs.r500.hdmi0_status = 0;
647
648         if (irqs) {
649                 WREG32(R_000044_GEN_INT_STATUS, irqs);
650         }
651         return irqs & irq_mask;
652 }
653
654 void rs600_irq_disable(struct radeon_device *rdev)
655 {
656         u32 hdmi0 = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL) &
657                 ~S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1);
658         WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
659         WREG32(R_000040_GEN_INT_CNTL, 0);
660         WREG32(R_006540_DxMODE_INT_MASK, 0);
661         /* Wait and acknowledge irq */
662         DRM_MDELAY(1);
663         rs600_irq_ack(rdev);
664 }
665
666 irqreturn_t rs600_irq_process(struct radeon_device *rdev)
667 {
668         u32 status, msi_rearm;
669         bool queue_hotplug = false;
670         bool queue_hdmi = false;
671
672         status = rs600_irq_ack(rdev);
673         if (!status &&
674             !rdev->irq.stat_regs.r500.disp_int &&
675             !rdev->irq.stat_regs.r500.hdmi0_status) {
676                 return IRQ_NONE;
677         }
678         while (status ||
679                rdev->irq.stat_regs.r500.disp_int ||
680                rdev->irq.stat_regs.r500.hdmi0_status) {
681                 /* SW interrupt */
682                 if (G_000044_SW_INT(status)) {
683                         radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
684                 }
685                 /* Vertical blank interrupts */
686                 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
687                         if (rdev->irq.crtc_vblank_int[0]) {
688                                 drm_handle_vblank(rdev->ddev, 0);
689                                 rdev->pm.vblank_sync = true;
690                                 DRM_WAKEUP(&rdev->irq.vblank_queue);
691                         }
692                         if (atomic_read(&rdev->irq.pflip[0]))
693                                 radeon_crtc_handle_flip(rdev, 0);
694                 }
695                 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
696                         if (rdev->irq.crtc_vblank_int[1]) {
697                                 drm_handle_vblank(rdev->ddev, 1);
698                                 rdev->pm.vblank_sync = true;
699                                 DRM_WAKEUP(&rdev->irq.vblank_queue);
700                         }
701                         if (atomic_read(&rdev->irq.pflip[1]))
702                                 radeon_crtc_handle_flip(rdev, 1);
703                 }
704                 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
705                         queue_hotplug = true;
706                         DRM_DEBUG("HPD1\n");
707                 }
708                 if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
709                         queue_hotplug = true;
710                         DRM_DEBUG("HPD2\n");
711                 }
712                 if (G_007404_HDMI0_AZ_FORMAT_WTRIG(rdev->irq.stat_regs.r500.hdmi0_status)) {
713                         queue_hdmi = true;
714                         DRM_DEBUG("HDMI0\n");
715                 }
716                 status = rs600_irq_ack(rdev);
717         }
718         if (queue_hotplug)
719                 taskqueue_enqueue(rdev->tq, &rdev->hotplug_work);
720         if (queue_hdmi)
721                 taskqueue_enqueue(rdev->tq, &rdev->audio_work);
722         if (rdev->msi_enabled) {
723                 switch (rdev->family) {
724                 case CHIP_RS600:
725                 case CHIP_RS690:
726                 case CHIP_RS740:
727                         msi_rearm = RREG32(RADEON_BUS_CNTL) & ~RS600_MSI_REARM;
728                         WREG32(RADEON_BUS_CNTL, msi_rearm);
729                         WREG32(RADEON_BUS_CNTL, msi_rearm | RS600_MSI_REARM);
730                         break;
731                 default:
732                         WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN);
733                         break;
734                 }
735         }
736         return IRQ_HANDLED;
737 }
738
739 u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc)
740 {
741         if (crtc == 0)
742                 return RREG32(R_0060A4_D1CRTC_STATUS_FRAME_COUNT);
743         else
744                 return RREG32(R_0068A4_D2CRTC_STATUS_FRAME_COUNT);
745 }
746
747 int rs600_mc_wait_for_idle(struct radeon_device *rdev)
748 {
749         unsigned i;
750
751         for (i = 0; i < rdev->usec_timeout; i++) {
752                 if (G_000000_MC_IDLE(RREG32_MC(R_000000_MC_STATUS)))
753                         return 0;
754                 DRM_UDELAY(1);
755         }
756         return -1;
757 }
758
759 static void rs600_gpu_init(struct radeon_device *rdev)
760 {
761         r420_pipes_init(rdev);
762         /* Wait for mc idle */
763         if (rs600_mc_wait_for_idle(rdev))
764                 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
765 }
766
767 static void rs600_mc_init(struct radeon_device *rdev)
768 {
769         u64 base;
770
771         rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
772         rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
773         rdev->mc.vram_is_ddr = true;
774         rdev->mc.vram_width = 128;
775         rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
776         rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
777         rdev->mc.visible_vram_size = rdev->mc.aper_size;
778         rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
779         base = RREG32_MC(R_000004_MC_FB_LOCATION);
780         base = G_000004_MC_FB_START(base) << 16;
781         radeon_vram_location(rdev, &rdev->mc, base);
782         rdev->mc.gtt_base_align = 0;
783         radeon_gtt_location(rdev, &rdev->mc);
784         radeon_update_bandwidth_info(rdev);
785 }
786
787 void rs600_bandwidth_update(struct radeon_device *rdev)
788 {
789         struct drm_display_mode *mode0 = NULL;
790         struct drm_display_mode *mode1 = NULL;
791         u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt;
792         /* FIXME: implement full support */
793
794         radeon_update_display_priority(rdev);
795
796         if (rdev->mode_info.crtcs[0]->base.enabled)
797                 mode0 = &rdev->mode_info.crtcs[0]->base.mode;
798         if (rdev->mode_info.crtcs[1]->base.enabled)
799                 mode1 = &rdev->mode_info.crtcs[1]->base.mode;
800
801         rs690_line_buffer_adjust(rdev, mode0, mode1);
802
803         if (rdev->disp_priority == 2) {
804                 d1mode_priority_a_cnt = RREG32(R_006548_D1MODE_PRIORITY_A_CNT);
805                 d2mode_priority_a_cnt = RREG32(R_006D48_D2MODE_PRIORITY_A_CNT);
806                 d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
807                 d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
808                 WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
809                 WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
810                 WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
811                 WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
812         }
813 }
814
815 uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
816 {
817         WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
818                 S_000070_MC_IND_CITF_ARB0(1));
819         return RREG32(R_000074_MC_IND_DATA);
820 }
821
822 void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
823 {
824         WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
825                 S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1));
826         WREG32(R_000074_MC_IND_DATA, v);
827 }
828
829 static void rs600_debugfs(struct radeon_device *rdev)
830 {
831         if (r100_debugfs_rbbm_init(rdev))
832                 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
833 }
834
835 void rs600_set_safe_registers(struct radeon_device *rdev)
836 {
837         rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm;
838         rdev->config.r300.reg_safe_bm_size = DRM_ARRAY_SIZE(rs600_reg_safe_bm);
839 }
840
841 static void rs600_mc_program(struct radeon_device *rdev)
842 {
843         struct rv515_mc_save save;
844
845         /* Stops all mc clients */
846         rv515_mc_stop(rdev, &save);
847
848         /* Wait for mc idle */
849         if (rs600_mc_wait_for_idle(rdev))
850                 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
851
852         /* FIXME: What does AGP means for such chipset ? */
853         WREG32_MC(R_000005_MC_AGP_LOCATION, 0x0FFFFFFF);
854         WREG32_MC(R_000006_AGP_BASE, 0);
855         WREG32_MC(R_000007_AGP_BASE_2, 0);
856         /* Program MC */
857         WREG32_MC(R_000004_MC_FB_LOCATION,
858                         S_000004_MC_FB_START(rdev->mc.vram_start >> 16) |
859                         S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16));
860         WREG32(R_000134_HDP_FB_LOCATION,
861                 S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
862
863         rv515_mc_resume(rdev, &save);
864 }
865
866 static int rs600_startup(struct radeon_device *rdev)
867 {
868         int r;
869
870         rs600_mc_program(rdev);
871         /* Resume clock */
872         rv515_clock_startup(rdev);
873         /* Initialize GPU configuration (# pipes, ...) */
874         rs600_gpu_init(rdev);
875         /* Initialize GART (initialize after TTM so we can allocate
876          * memory through TTM but finalize after TTM) */
877         r = rs600_gart_enable(rdev);
878         if (r)
879                 return r;
880
881         /* allocate wb buffer */
882         r = radeon_wb_init(rdev);
883         if (r)
884                 return r;
885
886         r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
887         if (r) {
888                 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
889                 return r;
890         }
891
892         /* Enable IRQ */
893         rs600_irq_set(rdev);
894         rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
895         /* 1M ring buffer */
896         r = r100_cp_init(rdev, 1024 * 1024);
897         if (r) {
898                 dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
899                 return r;
900         }
901
902         r = radeon_ib_pool_init(rdev);
903         if (r) {
904                 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
905                 return r;
906         }
907
908         r = r600_audio_init(rdev);
909         if (r) {
910                 dev_err(rdev->dev, "failed initializing audio\n");
911                 return r;
912         }
913
914         return 0;
915 }
916
917 int rs600_resume(struct radeon_device *rdev)
918 {
919         int r;
920
921         /* Make sur GART are not working */
922         rs600_gart_disable(rdev);
923         /* Resume clock before doing reset */
924         rv515_clock_startup(rdev);
925         /* Reset gpu before posting otherwise ATOM will enter infinite loop */
926         if (radeon_asic_reset(rdev)) {
927                 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
928                         RREG32(R_000E40_RBBM_STATUS),
929                         RREG32(R_0007C0_CP_STAT));
930         }
931         /* post */
932         atom_asic_init(rdev->mode_info.atom_context);
933         /* Resume clock after posting */
934         rv515_clock_startup(rdev);
935         /* Initialize surface registers */
936         radeon_surface_init(rdev);
937
938         rdev->accel_working = true;
939         r = rs600_startup(rdev);
940         if (r) {
941                 rdev->accel_working = false;
942         }
943         return r;
944 }
945
946 int rs600_suspend(struct radeon_device *rdev)
947 {
948         r600_audio_fini(rdev);
949         r100_cp_disable(rdev);
950         radeon_wb_disable(rdev);
951         rs600_irq_disable(rdev);
952         rs600_gart_disable(rdev);
953         return 0;
954 }
955
956 void rs600_fini(struct radeon_device *rdev)
957 {
958         r600_audio_fini(rdev);
959         r100_cp_fini(rdev);
960         radeon_wb_fini(rdev);
961         radeon_ib_pool_fini(rdev);
962         radeon_gem_fini(rdev);
963         rs600_gart_fini(rdev);
964         radeon_irq_kms_fini(rdev);
965         radeon_fence_driver_fini(rdev);
966         radeon_bo_fini(rdev);
967         radeon_atombios_fini(rdev);
968         free(rdev->bios, DRM_MEM_DRIVER);
969         rdev->bios = NULL;
970 }
971
972 int rs600_init(struct radeon_device *rdev)
973 {
974         int r;
975
976         /* Disable VGA */
977         rv515_vga_render_disable(rdev);
978         /* Initialize scratch registers */
979         radeon_scratch_init(rdev);
980         /* Initialize surface registers */
981         radeon_surface_init(rdev);
982         /* restore some register to sane defaults */
983         r100_restore_sanity(rdev);
984         /* BIOS */
985         if (!radeon_get_bios(rdev)) {
986                 if (ASIC_IS_AVIVO(rdev))
987                         return -EINVAL;
988         }
989         if (rdev->is_atom_bios) {
990                 r = radeon_atombios_init(rdev);
991                 if (r)
992                         return r;
993         } else {
994                 dev_err(rdev->dev, "Expecting atombios for RS600 GPU\n");
995                 return -EINVAL;
996         }
997         /* Reset gpu before posting otherwise ATOM will enter infinite loop */
998         if (radeon_asic_reset(rdev)) {
999                 dev_warn(rdev->dev,
1000                         "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
1001                         RREG32(R_000E40_RBBM_STATUS),
1002                         RREG32(R_0007C0_CP_STAT));
1003         }
1004         /* check if cards are posted or not */
1005         if (radeon_boot_test_post_card(rdev) == false)
1006                 return -EINVAL;
1007
1008         /* Initialize clocks */
1009         radeon_get_clock_info(rdev->ddev);
1010         /* initialize memory controller */
1011         rs600_mc_init(rdev);
1012         rs600_debugfs(rdev);
1013         /* Fence driver */
1014         r = radeon_fence_driver_init(rdev);
1015         if (r)
1016                 return r;
1017         r = radeon_irq_kms_init(rdev);
1018         if (r)
1019                 return r;
1020         /* Memory manager */
1021         r = radeon_bo_init(rdev);
1022         if (r)
1023                 return r;
1024         r = rs600_gart_init(rdev);
1025         if (r)
1026                 return r;
1027         rs600_set_safe_registers(rdev);
1028
1029         rdev->accel_working = true;
1030         r = rs600_startup(rdev);
1031         if (r) {
1032                 /* Somethings want wront with the accel init stop accel */
1033                 dev_err(rdev->dev, "Disabling GPU acceleration\n");
1034                 r100_cp_fini(rdev);
1035                 radeon_wb_fini(rdev);
1036                 radeon_ib_pool_fini(rdev);
1037                 rs600_gart_fini(rdev);
1038                 radeon_irq_kms_fini(rdev);
1039                 rdev->accel_working = false;
1040         }
1041         return 0;
1042 }