2 * Copyright 2011 Red Hat Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
32 * We store the last allocated bo in "hole", we always try to allocate
33 * after the last allocated bo. Principle is that in a linear GPU ring
34 * progression was is after last is the oldest bo we allocated and thus
35 * the first one that should no longer be in use by the GPU.
37 * If it's not the case we skip over the bo after last to the closest
38 * done bo if such one exist. If none exist and we are not asked to
39 * block we report failure to allocate.
41 * If we are asked to block we wait on all the oldest fence of all
42 * rings. We just wait for any of those fence to complete.
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
48 #include <dev/drm2/drmP.h>
51 static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo);
52 static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager);
54 int radeon_sa_bo_manager_init(struct radeon_device *rdev,
55 struct radeon_sa_manager *sa_manager,
56 unsigned size, u32 domain)
60 sx_init(&sa_manager->wq_lock, "drm__radeon_sa_manager_wq_mtx");
61 cv_init(&sa_manager->wq, "drm__radeon_sa_manager__wq");
62 sa_manager->bo = NULL;
63 sa_manager->size = size;
64 sa_manager->domain = domain;
65 sa_manager->hole = &sa_manager->olist;
66 INIT_LIST_HEAD(&sa_manager->olist);
67 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
68 INIT_LIST_HEAD(&sa_manager->flist[i]);
71 r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true,
72 RADEON_GEM_DOMAIN_CPU, NULL, &sa_manager->bo);
74 dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
81 void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
82 struct radeon_sa_manager *sa_manager)
84 struct radeon_sa_bo *sa_bo, *tmp;
86 if (!list_empty(&sa_manager->olist)) {
87 sa_manager->hole = &sa_manager->olist,
88 radeon_sa_bo_try_free(sa_manager);
89 if (!list_empty(&sa_manager->olist)) {
90 dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n");
93 list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
94 radeon_sa_bo_remove_locked(sa_bo);
96 radeon_bo_unref(&sa_manager->bo);
98 cv_destroy(&sa_manager->wq);
99 sx_destroy(&sa_manager->wq_lock);
102 int radeon_sa_bo_manager_start(struct radeon_device *rdev,
103 struct radeon_sa_manager *sa_manager)
107 if (sa_manager->bo == NULL) {
108 dev_err(rdev->dev, "no bo for sa manager\n");
113 r = radeon_bo_reserve(sa_manager->bo, false);
115 dev_err(rdev->dev, "(%d) failed to reserve manager bo\n", r);
118 r = radeon_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
120 radeon_bo_unreserve(sa_manager->bo);
121 dev_err(rdev->dev, "(%d) failed to pin manager bo\n", r);
124 r = radeon_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
125 radeon_bo_unreserve(sa_manager->bo);
129 int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
130 struct radeon_sa_manager *sa_manager)
134 if (sa_manager->bo == NULL) {
135 dev_err(rdev->dev, "no bo for sa manager\n");
139 r = radeon_bo_reserve(sa_manager->bo, false);
141 radeon_bo_kunmap(sa_manager->bo);
142 radeon_bo_unpin(sa_manager->bo);
143 radeon_bo_unreserve(sa_manager->bo);
148 static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo)
150 struct radeon_sa_manager *sa_manager = sa_bo->manager;
151 if (sa_manager->hole == &sa_bo->olist) {
152 sa_manager->hole = sa_bo->olist.prev;
154 list_del_init(&sa_bo->olist);
155 list_del_init(&sa_bo->flist);
156 radeon_fence_unref(&sa_bo->fence);
157 free(sa_bo, DRM_MEM_DRIVER);
160 static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager)
162 struct radeon_sa_bo *sa_bo, *tmp;
164 if (sa_manager->hole->next == &sa_manager->olist)
167 sa_bo = list_entry(sa_manager->hole->next, struct radeon_sa_bo, olist);
168 list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
169 if (sa_bo->fence == NULL || !radeon_fence_signaled(sa_bo->fence)) {
172 radeon_sa_bo_remove_locked(sa_bo);
176 static inline unsigned radeon_sa_bo_hole_soffset(struct radeon_sa_manager *sa_manager)
178 struct list_head *hole = sa_manager->hole;
180 if (hole != &sa_manager->olist) {
181 return list_entry(hole, struct radeon_sa_bo, olist)->eoffset;
186 static inline unsigned radeon_sa_bo_hole_eoffset(struct radeon_sa_manager *sa_manager)
188 struct list_head *hole = sa_manager->hole;
190 if (hole->next != &sa_manager->olist) {
191 return list_entry(hole->next, struct radeon_sa_bo, olist)->soffset;
193 return sa_manager->size;
196 static bool radeon_sa_bo_try_alloc(struct radeon_sa_manager *sa_manager,
197 struct radeon_sa_bo *sa_bo,
198 unsigned size, unsigned align)
200 unsigned soffset, eoffset, wasted;
202 soffset = radeon_sa_bo_hole_soffset(sa_manager);
203 eoffset = radeon_sa_bo_hole_eoffset(sa_manager);
204 wasted = (align - (soffset % align)) % align;
206 if ((eoffset - soffset) >= (size + wasted)) {
209 sa_bo->manager = sa_manager;
210 sa_bo->soffset = soffset;
211 sa_bo->eoffset = soffset + size;
212 list_add(&sa_bo->olist, sa_manager->hole);
213 INIT_LIST_HEAD(&sa_bo->flist);
214 sa_manager->hole = &sa_bo->olist;
221 * radeon_sa_event - Check if we can stop waiting
223 * @sa_manager: pointer to the sa_manager
224 * @size: number of bytes we want to allocate
225 * @align: alignment we need to match
227 * Check if either there is a fence we can wait for or
228 * enough free memory to satisfy the allocation directly
230 static bool radeon_sa_event(struct radeon_sa_manager *sa_manager,
231 unsigned size, unsigned align)
233 unsigned soffset, eoffset, wasted;
236 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
237 if (!list_empty(&sa_manager->flist[i])) {
242 soffset = radeon_sa_bo_hole_soffset(sa_manager);
243 eoffset = radeon_sa_bo_hole_eoffset(sa_manager);
244 wasted = (align - (soffset % align)) % align;
246 if ((eoffset - soffset) >= (size + wasted)) {
253 static bool radeon_sa_bo_next_hole(struct radeon_sa_manager *sa_manager,
254 struct radeon_fence **fences,
257 struct radeon_sa_bo *best_bo = NULL;
258 unsigned i, soffset, best, tmp;
260 /* if hole points to the end of the buffer */
261 if (sa_manager->hole->next == &sa_manager->olist) {
262 /* try again with its beginning */
263 sa_manager->hole = &sa_manager->olist;
267 soffset = radeon_sa_bo_hole_soffset(sa_manager);
268 /* to handle wrap around we add sa_manager->size */
269 best = sa_manager->size * 2;
270 /* go over all fence list and try to find the closest sa_bo
271 * of the current last
273 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
274 struct radeon_sa_bo *sa_bo;
276 if (list_empty(&sa_manager->flist[i])) {
280 sa_bo = list_first_entry(&sa_manager->flist[i],
281 struct radeon_sa_bo, flist);
283 if (!radeon_fence_signaled(sa_bo->fence)) {
284 fences[i] = sa_bo->fence;
288 /* limit the number of tries each ring gets */
293 tmp = sa_bo->soffset;
295 /* wrap around, pretend it's after */
296 tmp += sa_manager->size;
300 /* this sa bo is the closest one */
307 ++tries[best_bo->fence->ring];
308 sa_manager->hole = best_bo->olist.prev;
310 /* we knew that this one is signaled,
311 so it's save to remote it */
312 radeon_sa_bo_remove_locked(best_bo);
318 int radeon_sa_bo_new(struct radeon_device *rdev,
319 struct radeon_sa_manager *sa_manager,
320 struct radeon_sa_bo **sa_bo,
321 unsigned size, unsigned align, bool block)
323 struct radeon_fence *fences[RADEON_NUM_RINGS];
324 unsigned tries[RADEON_NUM_RINGS];
327 KASSERT(align <= RADEON_GPU_PAGE_SIZE, ("align > RADEON_GPU_PAGE_SIZE"));
328 KASSERT(size <= sa_manager->size, ("size > sa_manager->size"));
330 *sa_bo = malloc(sizeof(struct radeon_sa_bo), DRM_MEM_DRIVER, M_WAITOK);
331 if ((*sa_bo) == NULL) {
334 (*sa_bo)->manager = sa_manager;
335 (*sa_bo)->fence = NULL;
336 INIT_LIST_HEAD(&(*sa_bo)->olist);
337 INIT_LIST_HEAD(&(*sa_bo)->flist);
339 sx_xlock(&sa_manager->wq_lock);
341 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
347 radeon_sa_bo_try_free(sa_manager);
349 if (radeon_sa_bo_try_alloc(sa_manager, *sa_bo,
351 sx_xunlock(&sa_manager->wq_lock);
355 /* see if we can skip over some allocations */
356 } while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
358 sx_xunlock(&sa_manager->wq_lock);
359 r = radeon_fence_wait_any(rdev, fences, false);
360 sx_xlock(&sa_manager->wq_lock);
361 /* if we have nothing to wait for block */
362 if (r == -ENOENT && block) {
363 while (!radeon_sa_event(sa_manager, size, align)) {
364 r = -cv_wait_sig(&sa_manager->wq,
365 &sa_manager->wq_lock);
370 } else if (r == -ENOENT) {
376 sx_xunlock(&sa_manager->wq_lock);
377 free(*sa_bo, DRM_MEM_DRIVER);
382 void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo,
383 struct radeon_fence *fence)
385 struct radeon_sa_manager *sa_manager;
387 if (sa_bo == NULL || *sa_bo == NULL) {
391 sa_manager = (*sa_bo)->manager;
392 sx_xlock(&sa_manager->wq_lock);
393 if (fence && !radeon_fence_signaled(fence)) {
394 (*sa_bo)->fence = radeon_fence_ref(fence);
395 list_add_tail(&(*sa_bo)->flist,
396 &sa_manager->flist[fence->ring]);
398 radeon_sa_bo_remove_locked(*sa_bo);
400 cv_broadcast(&sa_manager->wq);
401 sx_xunlock(&sa_manager->wq_lock);
405 #if defined(CONFIG_DEBUG_FS)
406 void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
409 struct radeon_sa_bo *i;
411 spin_lock(&sa_manager->wq.lock);
412 list_for_each_entry(i, &sa_manager->olist, olist) {
413 if (&i->olist == sa_manager->hole) {
418 seq_printf(m, "[0x%08x 0x%08x] size %8d",
419 i->soffset, i->eoffset, i->eoffset - i->soffset);
421 seq_printf(m, " protected by 0x%016llx on ring %d",
422 i->fence->seq, i->fence->ring);
426 spin_unlock(&sa_manager->wq.lock);