2 * Copyright (c) 2000 Doug Rabson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/malloc.h>
35 #include <sys/kernel.h>
36 #include <sys/module.h>
39 #include <sys/ioccom.h>
40 #include <sys/agpio.h>
42 #include <sys/mutex.h>
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pcireg.h>
47 #include <pci/agppriv.h>
48 #include <pci/agpvar.h>
49 #include <pci/agpreg.h>
52 #include <vm/vm_object.h>
53 #include <vm/vm_page.h>
54 #include <vm/vm_pageout.h>
57 #include <machine/md_var.h>
58 #include <machine/bus.h>
59 #include <machine/resource.h>
62 MODULE_VERSION(agp, 1);
64 MALLOC_DEFINE(M_AGP, "agp", "AGP data structures");
67 static d_open_t agp_open;
68 static d_close_t agp_close;
69 static d_ioctl_t agp_ioctl;
70 static d_mmap_t agp_mmap;
72 static struct cdevsw agp_cdevsw = {
73 .d_version = D_VERSION,
74 .d_flags = D_NEEDGIANT,
82 static devclass_t agp_devclass;
83 #define KDEV2DEV(kdev) devclass_get_device(agp_devclass, minor(kdev))
85 /* Helper functions for implementing chipset mini drivers. */
90 #if defined(__i386__) || defined(__amd64__)
96 agp_find_caps(device_t dev)
101 if (pci_find_extcap(dev, PCIY_AGP, &capreg) != 0)
107 * Find an AGP display device (if any).
110 agp_find_display(void)
112 devclass_t pci = devclass_find("pci");
113 device_t bus, dev = 0;
115 int busnum, numkids, i;
117 for (busnum = 0; busnum < devclass_get_maxunit(pci); busnum++) {
118 bus = devclass_get_device(pci, busnum);
121 device_get_children(bus, &kids, &numkids);
122 for (i = 0; i < numkids; i++) {
124 if (pci_get_class(dev) == PCIC_DISPLAY
125 && pci_get_subclass(dev) == PCIS_DISPLAY_VGA)
126 if (agp_find_caps(dev)) {
139 agp_alloc_gatt(device_t dev)
141 u_int32_t apsize = AGP_GET_APERTURE(dev);
142 u_int32_t entries = apsize >> AGP_PAGE_SHIFT;
143 struct agp_gatt *gatt;
147 "allocating GATT for aperture of size %dM\n",
148 apsize / (1024*1024));
151 device_printf(dev, "bad aperture size\n");
155 gatt = malloc(sizeof(struct agp_gatt), M_AGP, M_NOWAIT);
159 gatt->ag_entries = entries;
160 gatt->ag_virtual = contigmalloc(entries * sizeof(u_int32_t), M_AGP, 0,
161 0, ~0, PAGE_SIZE, 0);
162 if (!gatt->ag_virtual) {
164 device_printf(dev, "contiguous allocation failed\n");
168 bzero(gatt->ag_virtual, entries * sizeof(u_int32_t));
169 gatt->ag_physical = vtophys((vm_offset_t) gatt->ag_virtual);
176 agp_free_gatt(struct agp_gatt *gatt)
178 contigfree(gatt->ag_virtual,
179 gatt->ag_entries * sizeof(u_int32_t), M_AGP);
183 static u_int agp_max[][2] = {
194 #define agp_max_size (sizeof(agp_max) / sizeof(agp_max[0]))
197 agp_generic_attach(device_t dev)
199 struct agp_softc *sc = device_get_softc(dev);
204 * Find and map the aperture.
207 sc->as_aperture = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 0);
208 if (!sc->as_aperture)
212 * Work out an upper bound for agp memory allocation. This
213 * uses a heurisitc table from the Linux driver.
215 memsize = ptoa(Maxmem) >> 20;
216 for (i = 0; i < agp_max_size; i++) {
217 if (memsize <= agp_max[i][0])
220 if (i == agp_max_size) i = agp_max_size - 1;
221 sc->as_maxmem = agp_max[i][1] << 20U;
224 * The lock is used to prevent re-entry to
225 * agp_generic_bind_memory() since that function can sleep.
227 mtx_init(&sc->as_lock, "agp lock", NULL, MTX_DEF);
230 * Initialise stuff for the userland device.
232 agp_devclass = devclass_find("agp");
233 TAILQ_INIT(&sc->as_memory);
236 sc->as_devnode = make_dev(&agp_cdevsw,
237 device_get_unit(dev),
247 agp_generic_detach(device_t dev)
249 struct agp_softc *sc = device_get_softc(dev);
251 destroy_dev(sc->as_devnode);
252 bus_release_resource(dev, SYS_RES_MEMORY, AGP_APBASE, sc->as_aperture);
253 mtx_destroy(&sc->as_lock);
259 * This does the enable logic for v3, with the same topology
260 * restrictions as in place for v2 -- one bus, one device on the bus.
263 agp_v3_enable(device_t dev, device_t mdev, u_int32_t mode)
265 u_int32_t tstatus, mstatus;
267 int rq, sba, fw, rate, arqsz, cal;
269 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
270 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
272 /* Set RQ to the min of mode, tstatus and mstatus */
273 rq = AGP_MODE_GET_RQ(mode);
274 if (AGP_MODE_GET_RQ(tstatus) < rq)
275 rq = AGP_MODE_GET_RQ(tstatus);
276 if (AGP_MODE_GET_RQ(mstatus) < rq)
277 rq = AGP_MODE_GET_RQ(mstatus);
280 * ARQSZ - Set the value to the maximum one.
281 * Don't allow the mode register to override values.
283 arqsz = AGP_MODE_GET_ARQSZ(mode);
284 if (AGP_MODE_GET_ARQSZ(tstatus) > rq)
285 rq = AGP_MODE_GET_ARQSZ(tstatus);
286 if (AGP_MODE_GET_ARQSZ(mstatus) > rq)
287 rq = AGP_MODE_GET_ARQSZ(mstatus);
289 /* Calibration cycle - don't allow override by mode register */
290 cal = AGP_MODE_GET_CAL(tstatus);
291 if (AGP_MODE_GET_CAL(mstatus) < cal)
292 cal = AGP_MODE_GET_CAL(mstatus);
294 /* SBA must be supported for AGP v3. */
297 /* Set FW if all three support it. */
298 fw = (AGP_MODE_GET_FW(tstatus)
299 & AGP_MODE_GET_FW(mstatus)
300 & AGP_MODE_GET_FW(mode));
302 /* Figure out the max rate */
303 rate = (AGP_MODE_GET_RATE(tstatus)
304 & AGP_MODE_GET_RATE(mstatus)
305 & AGP_MODE_GET_RATE(mode));
306 if (rate & AGP_MODE_V3_RATE_8x)
307 rate = AGP_MODE_V3_RATE_8x;
309 rate = AGP_MODE_V3_RATE_4x;
311 device_printf(dev, "Setting AGP v3 mode %d\n", rate * 4);
313 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, 0, 4);
315 /* Construct the new mode word and tell the hardware */
317 command = AGP_MODE_SET_RQ(0, rq);
318 command = AGP_MODE_SET_ARQSZ(command, arqsz);
319 command = AGP_MODE_SET_CAL(command, cal);
320 command = AGP_MODE_SET_SBA(command, sba);
321 command = AGP_MODE_SET_FW(command, fw);
322 command = AGP_MODE_SET_RATE(command, rate);
323 command = AGP_MODE_SET_MODE_3(command, 1);
324 command = AGP_MODE_SET_AGP(command, 1);
325 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4);
326 pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4);
332 agp_v2_enable(device_t dev, device_t mdev, u_int32_t mode)
334 u_int32_t tstatus, mstatus;
336 int rq, sba, fw, rate;
338 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
339 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
341 /* Set RQ to the min of mode, tstatus and mstatus */
342 rq = AGP_MODE_GET_RQ(mode);
343 if (AGP_MODE_GET_RQ(tstatus) < rq)
344 rq = AGP_MODE_GET_RQ(tstatus);
345 if (AGP_MODE_GET_RQ(mstatus) < rq)
346 rq = AGP_MODE_GET_RQ(mstatus);
348 /* Set SBA if all three can deal with SBA */
349 sba = (AGP_MODE_GET_SBA(tstatus)
350 & AGP_MODE_GET_SBA(mstatus)
351 & AGP_MODE_GET_SBA(mode));
354 fw = (AGP_MODE_GET_FW(tstatus)
355 & AGP_MODE_GET_FW(mstatus)
356 & AGP_MODE_GET_FW(mode));
358 /* Figure out the max rate */
359 rate = (AGP_MODE_GET_RATE(tstatus)
360 & AGP_MODE_GET_RATE(mstatus)
361 & AGP_MODE_GET_RATE(mode));
362 if (rate & AGP_MODE_V2_RATE_4x)
363 rate = AGP_MODE_V2_RATE_4x;
364 else if (rate & AGP_MODE_V2_RATE_2x)
365 rate = AGP_MODE_V2_RATE_2x;
367 rate = AGP_MODE_V2_RATE_1x;
369 device_printf(dev, "Setting AGP v2 mode %d\n", rate);
371 /* Construct the new mode word and tell the hardware */
373 command = AGP_MODE_SET_RQ(0, rq);
374 command = AGP_MODE_SET_SBA(command, sba);
375 command = AGP_MODE_SET_FW(command, fw);
376 command = AGP_MODE_SET_RATE(command, rate);
377 command = AGP_MODE_SET_AGP(command, 1);
378 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4);
379 pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4);
385 agp_generic_enable(device_t dev, u_int32_t mode)
387 device_t mdev = agp_find_display();
388 u_int32_t tstatus, mstatus;
391 AGP_DPF("can't find display\n");
395 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
396 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
399 * Check display and bridge for AGP v3 support. AGP v3 allows
400 * more variety in topology than v2, e.g. multiple AGP devices
401 * attached to one bridge, or multiple AGP bridges in one
402 * system. This doesn't attempt to address those situations,
403 * but should work fine for a classic single AGP slot system
406 if (AGP_MODE_GET_MODE_3(mode) &&
407 AGP_MODE_GET_MODE_3(tstatus) &&
408 AGP_MODE_GET_MODE_3(mstatus))
409 return (agp_v3_enable(dev, mdev, mode));
411 return (agp_v2_enable(dev, mdev, mode));
415 agp_generic_alloc_memory(device_t dev, int type, vm_size_t size)
417 struct agp_softc *sc = device_get_softc(dev);
418 struct agp_memory *mem;
420 if ((size & (AGP_PAGE_SIZE - 1)) != 0)
423 if (sc->as_allocated + size > sc->as_maxmem)
427 printf("agp_generic_alloc_memory: unsupported type %d\n",
432 mem = malloc(sizeof *mem, M_AGP, M_WAITOK);
433 mem->am_id = sc->as_nextid++;
436 mem->am_obj = vm_object_allocate(OBJT_DEFAULT, atop(round_page(size)));
437 mem->am_physical = 0;
439 mem->am_is_bound = 0;
440 TAILQ_INSERT_TAIL(&sc->as_memory, mem, am_link);
441 sc->as_allocated += size;
447 agp_generic_free_memory(device_t dev, struct agp_memory *mem)
449 struct agp_softc *sc = device_get_softc(dev);
451 if (mem->am_is_bound)
454 sc->as_allocated -= mem->am_size;
455 TAILQ_REMOVE(&sc->as_memory, mem, am_link);
456 vm_object_deallocate(mem->am_obj);
462 agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
465 struct agp_softc *sc = device_get_softc(dev);
470 /* Do some sanity checks first. */
471 if (offset < 0 || (offset & (AGP_PAGE_SIZE - 1)) != 0 ||
472 offset + mem->am_size > AGP_GET_APERTURE(dev)) {
473 device_printf(dev, "binding memory at bad offset %#x\n",
479 * Allocate the pages early, before acquiring the lock,
480 * because vm_page_grab() used with VM_ALLOC_RETRY may
481 * block and we can't hold a mutex while blocking.
483 VM_OBJECT_LOCK(mem->am_obj);
484 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
486 * Find a page from the object and wire it
487 * down. This page will be mapped using one or more
488 * entries in the GATT (assuming that PAGE_SIZE >=
489 * AGP_PAGE_SIZE. If this is the first call to bind,
490 * the pages will be allocated and zeroed.
492 m = vm_page_grab(mem->am_obj, OFF_TO_IDX(i),
493 VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
494 AGP_DPF("found page pa=%#x\n", VM_PAGE_TO_PHYS(m));
496 VM_OBJECT_UNLOCK(mem->am_obj);
498 mtx_lock(&sc->as_lock);
500 if (mem->am_is_bound) {
501 device_printf(dev, "memory already bound\n");
503 VM_OBJECT_LOCK(mem->am_obj);
508 * Bind the individual pages and flush the chipset's
511 VM_OBJECT_LOCK(mem->am_obj);
512 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
513 m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(i));
516 * Install entries in the GATT, making sure that if
517 * AGP_PAGE_SIZE < PAGE_SIZE and mem->am_size is not
518 * aligned to PAGE_SIZE, we don't modify too many GATT
521 for (j = 0; j < PAGE_SIZE && i + j < mem->am_size;
522 j += AGP_PAGE_SIZE) {
523 vm_offset_t pa = VM_PAGE_TO_PHYS(m) + j;
524 AGP_DPF("binding offset %#x to pa %#x\n",
526 error = AGP_BIND_PAGE(dev, offset + i + j, pa);
529 * Bail out. Reverse all the mappings
530 * and unwire the pages.
533 for (k = 0; k < i + j; k += AGP_PAGE_SIZE)
534 AGP_UNBIND_PAGE(dev, offset + k);
540 VM_OBJECT_UNLOCK(mem->am_obj);
543 * Flush the cpu cache since we are providing a new mapping
549 * Make sure the chipset gets the new mappings.
553 mem->am_offset = offset;
554 mem->am_is_bound = 1;
556 mtx_unlock(&sc->as_lock);
560 mtx_unlock(&sc->as_lock);
561 VM_OBJECT_LOCK_ASSERT(mem->am_obj, MA_OWNED);
562 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
563 m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(i));
564 vm_page_lock_queues();
565 vm_page_unwire(m, 0);
566 vm_page_unlock_queues();
568 VM_OBJECT_UNLOCK(mem->am_obj);
574 agp_generic_unbind_memory(device_t dev, struct agp_memory *mem)
576 struct agp_softc *sc = device_get_softc(dev);
580 mtx_lock(&sc->as_lock);
582 if (!mem->am_is_bound) {
583 device_printf(dev, "memory is not bound\n");
584 mtx_unlock(&sc->as_lock);
590 * Unbind the individual pages and flush the chipset's
591 * TLB. Unwire the pages so they can be swapped.
593 for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE)
594 AGP_UNBIND_PAGE(dev, mem->am_offset + i);
595 VM_OBJECT_LOCK(mem->am_obj);
596 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
597 m = vm_page_lookup(mem->am_obj, atop(i));
598 vm_page_lock_queues();
599 vm_page_unwire(m, 0);
600 vm_page_unlock_queues();
602 VM_OBJECT_UNLOCK(mem->am_obj);
608 mem->am_is_bound = 0;
610 mtx_unlock(&sc->as_lock);
615 /* Helper functions for implementing user/kernel api */
618 agp_acquire_helper(device_t dev, enum agp_acquire_state state)
620 struct agp_softc *sc = device_get_softc(dev);
622 if (sc->as_state != AGP_ACQUIRE_FREE)
624 sc->as_state = state;
630 agp_release_helper(device_t dev, enum agp_acquire_state state)
632 struct agp_softc *sc = device_get_softc(dev);
634 if (sc->as_state == AGP_ACQUIRE_FREE)
637 if (sc->as_state != state)
640 sc->as_state = AGP_ACQUIRE_FREE;
644 static struct agp_memory *
645 agp_find_memory(device_t dev, int id)
647 struct agp_softc *sc = device_get_softc(dev);
648 struct agp_memory *mem;
650 AGP_DPF("searching for memory block %d\n", id);
651 TAILQ_FOREACH(mem, &sc->as_memory, am_link) {
652 AGP_DPF("considering memory block %d\n", mem->am_id);
653 if (mem->am_id == id)
659 /* Implementation of the userland ioctl api */
662 agp_info_user(device_t dev, agp_info *info)
664 struct agp_softc *sc = device_get_softc(dev);
666 bzero(info, sizeof *info);
667 info->bridge_id = pci_get_devid(dev);
669 pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
670 info->aper_base = rman_get_start(sc->as_aperture);
671 info->aper_size = AGP_GET_APERTURE(dev) >> 20;
672 info->pg_total = info->pg_system = sc->as_maxmem >> AGP_PAGE_SHIFT;
673 info->pg_used = sc->as_allocated >> AGP_PAGE_SHIFT;
679 agp_setup_user(device_t dev, agp_setup *setup)
681 return AGP_ENABLE(dev, setup->agp_mode);
685 agp_allocate_user(device_t dev, agp_allocate *alloc)
687 struct agp_memory *mem;
689 mem = AGP_ALLOC_MEMORY(dev,
691 alloc->pg_count << AGP_PAGE_SHIFT);
693 alloc->key = mem->am_id;
694 alloc->physical = mem->am_physical;
702 agp_deallocate_user(device_t dev, int id)
704 struct agp_memory *mem = agp_find_memory(dev, id);;
707 AGP_FREE_MEMORY(dev, mem);
715 agp_bind_user(device_t dev, agp_bind *bind)
717 struct agp_memory *mem = agp_find_memory(dev, bind->key);
722 return AGP_BIND_MEMORY(dev, mem, bind->pg_start << AGP_PAGE_SHIFT);
726 agp_unbind_user(device_t dev, agp_unbind *unbind)
728 struct agp_memory *mem = agp_find_memory(dev, unbind->key);
733 return AGP_UNBIND_MEMORY(dev, mem);
737 agp_open(struct cdev *kdev, int oflags, int devtype, struct thread *td)
739 device_t dev = KDEV2DEV(kdev);
740 struct agp_softc *sc = device_get_softc(dev);
742 if (!sc->as_isopen) {
751 agp_close(struct cdev *kdev, int fflag, int devtype, struct thread *td)
753 device_t dev = KDEV2DEV(kdev);
754 struct agp_softc *sc = device_get_softc(dev);
755 struct agp_memory *mem;
758 * Clear the GATT and force release on last close
760 while ((mem = TAILQ_FIRST(&sc->as_memory)) != 0) {
761 if (mem->am_is_bound)
762 AGP_UNBIND_MEMORY(dev, mem);
763 AGP_FREE_MEMORY(dev, mem);
765 if (sc->as_state == AGP_ACQUIRE_USER)
766 agp_release_helper(dev, AGP_ACQUIRE_USER);
774 agp_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int fflag, struct thread *td)
776 device_t dev = KDEV2DEV(kdev);
780 return agp_info_user(dev, (agp_info *) data);
783 return agp_acquire_helper(dev, AGP_ACQUIRE_USER);
786 return agp_release_helper(dev, AGP_ACQUIRE_USER);
789 return agp_setup_user(dev, (agp_setup *)data);
791 case AGPIOC_ALLOCATE:
792 return agp_allocate_user(dev, (agp_allocate *)data);
794 case AGPIOC_DEALLOCATE:
795 return agp_deallocate_user(dev, *(int *) data);
798 return agp_bind_user(dev, (agp_bind *)data);
801 return agp_unbind_user(dev, (agp_unbind *)data);
809 agp_mmap(struct cdev *kdev, vm_offset_t offset, vm_paddr_t *paddr, int prot)
811 device_t dev = KDEV2DEV(kdev);
812 struct agp_softc *sc = device_get_softc(dev);
814 if (offset > AGP_GET_APERTURE(dev))
816 *paddr = rman_get_start(sc->as_aperture) + offset;
820 /* Implementation of the kernel api */
825 device_t *children, child;
830 if (devclass_get_devices(agp_devclass, &children, &count) != 0)
833 for (i = 0; i < count; i++) {
834 if (device_is_attached(children[i])) {
839 free(children, M_TEMP);
843 enum agp_acquire_state
844 agp_state(device_t dev)
846 struct agp_softc *sc = device_get_softc(dev);
851 agp_get_info(device_t dev, struct agp_info *info)
853 struct agp_softc *sc = device_get_softc(dev);
856 pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
857 info->ai_aperture_base = rman_get_start(sc->as_aperture);
858 info->ai_aperture_size = rman_get_size(sc->as_aperture);
859 info->ai_memory_allowed = sc->as_maxmem;
860 info->ai_memory_used = sc->as_allocated;
864 agp_acquire(device_t dev)
866 return agp_acquire_helper(dev, AGP_ACQUIRE_KERNEL);
870 agp_release(device_t dev)
872 return agp_release_helper(dev, AGP_ACQUIRE_KERNEL);
876 agp_enable(device_t dev, u_int32_t mode)
878 return AGP_ENABLE(dev, mode);
881 void *agp_alloc_memory(device_t dev, int type, vm_size_t bytes)
883 return (void *) AGP_ALLOC_MEMORY(dev, type, bytes);
886 void agp_free_memory(device_t dev, void *handle)
888 struct agp_memory *mem = (struct agp_memory *) handle;
889 AGP_FREE_MEMORY(dev, mem);
892 int agp_bind_memory(device_t dev, void *handle, vm_offset_t offset)
894 struct agp_memory *mem = (struct agp_memory *) handle;
895 return AGP_BIND_MEMORY(dev, mem, offset);
898 int agp_unbind_memory(device_t dev, void *handle)
900 struct agp_memory *mem = (struct agp_memory *) handle;
901 return AGP_UNBIND_MEMORY(dev, mem);
904 void agp_memory_info(device_t dev, void *handle, struct
907 struct agp_memory *mem = (struct agp_memory *) handle;
909 mi->ami_size = mem->am_size;
910 mi->ami_physical = mem->am_physical;
911 mi->ami_offset = mem->am_offset;
912 mi->ami_is_bound = mem->am_is_bound;