2 * Copyright (c) 2000 Doug Rabson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/malloc.h>
35 #include <sys/kernel.h>
36 #include <sys/module.h>
39 #include <sys/ioccom.h>
40 #include <sys/agpio.h>
42 #include <sys/mutex.h>
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pcireg.h>
47 #include <pci/agppriv.h>
48 #include <pci/agpvar.h>
49 #include <pci/agpreg.h>
52 #include <vm/vm_object.h>
53 #include <vm/vm_page.h>
54 #include <vm/vm_pageout.h>
57 #include <machine/md_var.h>
58 #include <machine/bus.h>
59 #include <machine/resource.h>
62 MODULE_VERSION(agp, 1);
64 MALLOC_DEFINE(M_AGP, "agp", "AGP data structures");
67 static d_open_t agp_open;
68 static d_close_t agp_close;
69 static d_ioctl_t agp_ioctl;
70 static d_mmap_t agp_mmap;
72 static struct cdevsw agp_cdevsw = {
73 .d_version = D_VERSION,
74 .d_flags = D_NEEDGIANT,
82 static devclass_t agp_devclass;
83 #define KDEV2DEV(kdev) devclass_get_device(agp_devclass, minor(kdev))
85 /* Helper functions for implementing chipset mini drivers. */
90 #if defined(__i386__) || defined(__amd64__)
96 agp_find_caps(device_t dev)
101 if (pci_find_extcap(dev, PCIY_AGP, &capreg) != 0)
107 * Find an AGP display device (if any).
110 agp_find_display(void)
112 devclass_t pci = devclass_find("pci");
113 device_t bus, dev = 0;
115 int busnum, numkids, i;
117 for (busnum = 0; busnum < devclass_get_maxunit(pci); busnum++) {
118 bus = devclass_get_device(pci, busnum);
121 device_get_children(bus, &kids, &numkids);
122 for (i = 0; i < numkids; i++) {
124 if (pci_get_class(dev) == PCIC_DISPLAY
125 && pci_get_subclass(dev) == PCIS_DISPLAY_VGA)
126 if (agp_find_caps(dev)) {
139 agp_alloc_gatt(device_t dev)
141 u_int32_t apsize = AGP_GET_APERTURE(dev);
142 u_int32_t entries = apsize >> AGP_PAGE_SHIFT;
143 struct agp_gatt *gatt;
147 "allocating GATT for aperture of size %dM\n",
148 apsize / (1024*1024));
151 device_printf(dev, "bad aperture size\n");
155 gatt = malloc(sizeof(struct agp_gatt), M_AGP, M_NOWAIT);
159 gatt->ag_entries = entries;
160 gatt->ag_virtual = contigmalloc(entries * sizeof(u_int32_t), M_AGP, 0,
161 0, ~0, PAGE_SIZE, 0);
162 if (!gatt->ag_virtual) {
164 device_printf(dev, "contiguous allocation failed\n");
168 bzero(gatt->ag_virtual, entries * sizeof(u_int32_t));
169 gatt->ag_physical = vtophys((vm_offset_t) gatt->ag_virtual);
176 agp_free_gatt(struct agp_gatt *gatt)
178 contigfree(gatt->ag_virtual,
179 gatt->ag_entries * sizeof(u_int32_t), M_AGP);
183 static int agp_max[][2] = {
194 #define agp_max_size (sizeof(agp_max) / sizeof(agp_max[0]))
197 agp_generic_attach(device_t dev)
199 struct agp_softc *sc = device_get_softc(dev);
203 * Find and map the aperture.
206 sc->as_aperture = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 0);
207 if (!sc->as_aperture)
211 * Work out an upper bound for agp memory allocation. This
212 * uses a heurisitc table from the Linux driver.
214 memsize = ptoa(Maxmem) >> 20;
215 for (i = 0; i < agp_max_size; i++) {
216 if (memsize <= agp_max[i][0])
219 if (i == agp_max_size) i = agp_max_size - 1;
220 sc->as_maxmem = agp_max[i][1] << 20U;
223 * The lock is used to prevent re-entry to
224 * agp_generic_bind_memory() since that function can sleep.
226 mtx_init(&sc->as_lock, "agp lock", NULL, MTX_DEF);
229 * Initialise stuff for the userland device.
231 agp_devclass = devclass_find("agp");
232 TAILQ_INIT(&sc->as_memory);
235 sc->as_devnode = make_dev(&agp_cdevsw,
236 device_get_unit(dev),
246 agp_generic_detach(device_t dev)
248 struct agp_softc *sc = device_get_softc(dev);
250 destroy_dev(sc->as_devnode);
251 bus_release_resource(dev, SYS_RES_MEMORY, AGP_APBASE, sc->as_aperture);
252 mtx_destroy(&sc->as_lock);
258 * This does the enable logic for v3, with the same topology
259 * restrictions as in place for v2 -- one bus, one device on the bus.
262 agp_v3_enable(device_t dev, device_t mdev, u_int32_t mode)
264 u_int32_t tstatus, mstatus;
266 int rq, sba, fw, rate, arqsz, cal;
268 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
269 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
271 /* Set RQ to the min of mode, tstatus and mstatus */
272 rq = AGP_MODE_GET_RQ(mode);
273 if (AGP_MODE_GET_RQ(tstatus) < rq)
274 rq = AGP_MODE_GET_RQ(tstatus);
275 if (AGP_MODE_GET_RQ(mstatus) < rq)
276 rq = AGP_MODE_GET_RQ(mstatus);
279 * ARQSZ - Set the value to the maximum one.
280 * Don't allow the mode register to override values.
282 arqsz = AGP_MODE_GET_ARQSZ(mode);
283 if (AGP_MODE_GET_ARQSZ(tstatus) > rq)
284 rq = AGP_MODE_GET_ARQSZ(tstatus);
285 if (AGP_MODE_GET_ARQSZ(mstatus) > rq)
286 rq = AGP_MODE_GET_ARQSZ(mstatus);
288 /* Calibration cycle - don't allow override by mode register */
289 cal = AGP_MODE_GET_CAL(tstatus);
290 if (AGP_MODE_GET_CAL(mstatus) < cal)
291 cal = AGP_MODE_GET_CAL(mstatus);
293 /* SBA must be supported for AGP v3. */
296 /* Set FW if all three support it. */
297 fw = (AGP_MODE_GET_FW(tstatus)
298 & AGP_MODE_GET_FW(mstatus)
299 & AGP_MODE_GET_FW(mode));
301 /* Figure out the max rate */
302 rate = (AGP_MODE_GET_RATE(tstatus)
303 & AGP_MODE_GET_RATE(mstatus)
304 & AGP_MODE_GET_RATE(mode));
305 if (rate & AGP_MODE_V3_RATE_8x)
306 rate = AGP_MODE_V3_RATE_8x;
308 rate = AGP_MODE_V3_RATE_4x;
310 device_printf(dev, "Setting AGP v3 mode %d\n", rate * 4);
312 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, 0, 4);
314 /* Construct the new mode word and tell the hardware */
316 command = AGP_MODE_SET_RQ(0, rq);
317 command = AGP_MODE_SET_ARQSZ(command, arqsz);
318 command = AGP_MODE_SET_CAL(command, cal);
319 command = AGP_MODE_SET_SBA(command, sba);
320 command = AGP_MODE_SET_FW(command, fw);
321 command = AGP_MODE_SET_RATE(command, rate);
322 command = AGP_MODE_SET_MODE_3(command, 1);
323 command = AGP_MODE_SET_AGP(command, 1);
324 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4);
325 pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4);
331 agp_v2_enable(device_t dev, device_t mdev, u_int32_t mode)
333 u_int32_t tstatus, mstatus;
335 int rq, sba, fw, rate;
337 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
338 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
340 /* Set RQ to the min of mode, tstatus and mstatus */
341 rq = AGP_MODE_GET_RQ(mode);
342 if (AGP_MODE_GET_RQ(tstatus) < rq)
343 rq = AGP_MODE_GET_RQ(tstatus);
344 if (AGP_MODE_GET_RQ(mstatus) < rq)
345 rq = AGP_MODE_GET_RQ(mstatus);
347 /* Set SBA if all three can deal with SBA */
348 sba = (AGP_MODE_GET_SBA(tstatus)
349 & AGP_MODE_GET_SBA(mstatus)
350 & AGP_MODE_GET_SBA(mode));
353 fw = (AGP_MODE_GET_FW(tstatus)
354 & AGP_MODE_GET_FW(mstatus)
355 & AGP_MODE_GET_FW(mode));
357 /* Figure out the max rate */
358 rate = (AGP_MODE_GET_RATE(tstatus)
359 & AGP_MODE_GET_RATE(mstatus)
360 & AGP_MODE_GET_RATE(mode));
361 if (rate & AGP_MODE_V2_RATE_4x)
362 rate = AGP_MODE_V2_RATE_4x;
363 else if (rate & AGP_MODE_V2_RATE_2x)
364 rate = AGP_MODE_V2_RATE_2x;
366 rate = AGP_MODE_V2_RATE_1x;
368 device_printf(dev, "Setting AGP v2 mode %d\n", rate);
370 /* Construct the new mode word and tell the hardware */
372 command = AGP_MODE_SET_RQ(0, rq);
373 command = AGP_MODE_SET_SBA(command, sba);
374 command = AGP_MODE_SET_FW(command, fw);
375 command = AGP_MODE_SET_RATE(command, rate);
376 command = AGP_MODE_SET_AGP(command, 1);
377 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4);
378 pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4);
384 agp_generic_enable(device_t dev, u_int32_t mode)
386 device_t mdev = agp_find_display();
387 u_int32_t tstatus, mstatus;
390 AGP_DPF("can't find display\n");
394 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
395 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
398 * Check display and bridge for AGP v3 support. AGP v3 allows
399 * more variety in topology than v2, e.g. multiple AGP devices
400 * attached to one bridge, or multiple AGP bridges in one
401 * system. This doesn't attempt to address those situations,
402 * but should work fine for a classic single AGP slot system
405 if (AGP_MODE_GET_MODE_3(mode) &&
406 AGP_MODE_GET_MODE_3(tstatus) &&
407 AGP_MODE_GET_MODE_3(mstatus))
408 return (agp_v3_enable(dev, mdev, mode));
410 return (agp_v2_enable(dev, mdev, mode));
414 agp_generic_alloc_memory(device_t dev, int type, vm_size_t size)
416 struct agp_softc *sc = device_get_softc(dev);
417 struct agp_memory *mem;
419 if ((size & (AGP_PAGE_SIZE - 1)) != 0)
422 if (sc->as_allocated + size > sc->as_maxmem)
426 printf("agp_generic_alloc_memory: unsupported type %d\n",
431 mem = malloc(sizeof *mem, M_AGP, M_WAITOK);
432 mem->am_id = sc->as_nextid++;
435 mem->am_obj = vm_object_allocate(OBJT_DEFAULT, atop(round_page(size)));
436 mem->am_physical = 0;
438 mem->am_is_bound = 0;
439 TAILQ_INSERT_TAIL(&sc->as_memory, mem, am_link);
440 sc->as_allocated += size;
446 agp_generic_free_memory(device_t dev, struct agp_memory *mem)
448 struct agp_softc *sc = device_get_softc(dev);
450 if (mem->am_is_bound)
453 sc->as_allocated -= mem->am_size;
454 TAILQ_REMOVE(&sc->as_memory, mem, am_link);
455 vm_object_deallocate(mem->am_obj);
461 agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
464 struct agp_softc *sc = device_get_softc(dev);
469 /* Do some sanity checks first. */
470 if (offset < 0 || (offset & (AGP_PAGE_SIZE - 1)) != 0 ||
471 offset + mem->am_size > AGP_GET_APERTURE(dev)) {
472 device_printf(dev, "binding memory at bad offset %#x\n",
478 * Allocate the pages early, before acquiring the lock,
479 * because vm_page_grab() used with VM_ALLOC_RETRY may
480 * block and we can't hold a mutex while blocking.
482 VM_OBJECT_LOCK(mem->am_obj);
483 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
485 * Find a page from the object and wire it
486 * down. This page will be mapped using one or more
487 * entries in the GATT (assuming that PAGE_SIZE >=
488 * AGP_PAGE_SIZE. If this is the first call to bind,
489 * the pages will be allocated and zeroed.
491 m = vm_page_grab(mem->am_obj, OFF_TO_IDX(i),
492 VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
493 AGP_DPF("found page pa=%#x\n", VM_PAGE_TO_PHYS(m));
495 VM_OBJECT_UNLOCK(mem->am_obj);
497 mtx_lock(&sc->as_lock);
499 if (mem->am_is_bound) {
500 device_printf(dev, "memory already bound\n");
502 VM_OBJECT_LOCK(mem->am_obj);
507 * Bind the individual pages and flush the chipset's
510 VM_OBJECT_LOCK(mem->am_obj);
511 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
512 m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(i));
515 * Install entries in the GATT, making sure that if
516 * AGP_PAGE_SIZE < PAGE_SIZE and mem->am_size is not
517 * aligned to PAGE_SIZE, we don't modify too many GATT
520 for (j = 0; j < PAGE_SIZE && i + j < mem->am_size;
521 j += AGP_PAGE_SIZE) {
522 vm_offset_t pa = VM_PAGE_TO_PHYS(m) + j;
523 AGP_DPF("binding offset %#x to pa %#x\n",
525 error = AGP_BIND_PAGE(dev, offset + i + j, pa);
528 * Bail out. Reverse all the mappings
529 * and unwire the pages.
531 vm_page_lock_queues();
533 vm_page_unlock_queues();
534 for (k = 0; k < i + j; k += AGP_PAGE_SIZE)
535 AGP_UNBIND_PAGE(dev, offset + k);
539 vm_page_lock_queues();
541 vm_page_unlock_queues();
543 VM_OBJECT_UNLOCK(mem->am_obj);
546 * Flush the cpu cache since we are providing a new mapping
552 * Make sure the chipset gets the new mappings.
556 mem->am_offset = offset;
557 mem->am_is_bound = 1;
559 mtx_unlock(&sc->as_lock);
563 mtx_unlock(&sc->as_lock);
564 VM_OBJECT_LOCK_ASSERT(mem->am_obj, MA_OWNED);
565 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
566 m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(i));
567 vm_page_lock_queues();
568 vm_page_unwire(m, 0);
569 vm_page_unlock_queues();
571 VM_OBJECT_UNLOCK(mem->am_obj);
577 agp_generic_unbind_memory(device_t dev, struct agp_memory *mem)
579 struct agp_softc *sc = device_get_softc(dev);
583 mtx_lock(&sc->as_lock);
585 if (!mem->am_is_bound) {
586 device_printf(dev, "memory is not bound\n");
587 mtx_unlock(&sc->as_lock);
593 * Unbind the individual pages and flush the chipset's
594 * TLB. Unwire the pages so they can be swapped.
596 for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE)
597 AGP_UNBIND_PAGE(dev, mem->am_offset + i);
598 VM_OBJECT_LOCK(mem->am_obj);
599 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
600 m = vm_page_lookup(mem->am_obj, atop(i));
601 vm_page_lock_queues();
602 vm_page_unwire(m, 0);
603 vm_page_unlock_queues();
605 VM_OBJECT_UNLOCK(mem->am_obj);
611 mem->am_is_bound = 0;
613 mtx_unlock(&sc->as_lock);
618 /* Helper functions for implementing user/kernel api */
621 agp_acquire_helper(device_t dev, enum agp_acquire_state state)
623 struct agp_softc *sc = device_get_softc(dev);
625 if (sc->as_state != AGP_ACQUIRE_FREE)
627 sc->as_state = state;
633 agp_release_helper(device_t dev, enum agp_acquire_state state)
635 struct agp_softc *sc = device_get_softc(dev);
637 if (sc->as_state == AGP_ACQUIRE_FREE)
640 if (sc->as_state != state)
643 sc->as_state = AGP_ACQUIRE_FREE;
647 static struct agp_memory *
648 agp_find_memory(device_t dev, int id)
650 struct agp_softc *sc = device_get_softc(dev);
651 struct agp_memory *mem;
653 AGP_DPF("searching for memory block %d\n", id);
654 TAILQ_FOREACH(mem, &sc->as_memory, am_link) {
655 AGP_DPF("considering memory block %d\n", mem->am_id);
656 if (mem->am_id == id)
662 /* Implementation of the userland ioctl api */
665 agp_info_user(device_t dev, agp_info *info)
667 struct agp_softc *sc = device_get_softc(dev);
669 bzero(info, sizeof *info);
670 info->bridge_id = pci_get_devid(dev);
672 pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
673 info->aper_base = rman_get_start(sc->as_aperture);
674 info->aper_size = AGP_GET_APERTURE(dev) >> 20;
675 info->pg_total = info->pg_system = sc->as_maxmem >> AGP_PAGE_SHIFT;
676 info->pg_used = sc->as_allocated >> AGP_PAGE_SHIFT;
682 agp_setup_user(device_t dev, agp_setup *setup)
684 return AGP_ENABLE(dev, setup->agp_mode);
688 agp_allocate_user(device_t dev, agp_allocate *alloc)
690 struct agp_memory *mem;
692 mem = AGP_ALLOC_MEMORY(dev,
694 alloc->pg_count << AGP_PAGE_SHIFT);
696 alloc->key = mem->am_id;
697 alloc->physical = mem->am_physical;
705 agp_deallocate_user(device_t dev, int id)
707 struct agp_memory *mem = agp_find_memory(dev, id);;
710 AGP_FREE_MEMORY(dev, mem);
718 agp_bind_user(device_t dev, agp_bind *bind)
720 struct agp_memory *mem = agp_find_memory(dev, bind->key);
725 return AGP_BIND_MEMORY(dev, mem, bind->pg_start << AGP_PAGE_SHIFT);
729 agp_unbind_user(device_t dev, agp_unbind *unbind)
731 struct agp_memory *mem = agp_find_memory(dev, unbind->key);
736 return AGP_UNBIND_MEMORY(dev, mem);
740 agp_open(struct cdev *kdev, int oflags, int devtype, struct thread *td)
742 device_t dev = KDEV2DEV(kdev);
743 struct agp_softc *sc = device_get_softc(dev);
745 if (!sc->as_isopen) {
754 agp_close(struct cdev *kdev, int fflag, int devtype, struct thread *td)
756 device_t dev = KDEV2DEV(kdev);
757 struct agp_softc *sc = device_get_softc(dev);
758 struct agp_memory *mem;
761 * Clear the GATT and force release on last close
763 while ((mem = TAILQ_FIRST(&sc->as_memory)) != 0) {
764 if (mem->am_is_bound)
765 AGP_UNBIND_MEMORY(dev, mem);
766 AGP_FREE_MEMORY(dev, mem);
768 if (sc->as_state == AGP_ACQUIRE_USER)
769 agp_release_helper(dev, AGP_ACQUIRE_USER);
777 agp_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int fflag, struct thread *td)
779 device_t dev = KDEV2DEV(kdev);
783 return agp_info_user(dev, (agp_info *) data);
786 return agp_acquire_helper(dev, AGP_ACQUIRE_USER);
789 return agp_release_helper(dev, AGP_ACQUIRE_USER);
792 return agp_setup_user(dev, (agp_setup *)data);
794 case AGPIOC_ALLOCATE:
795 return agp_allocate_user(dev, (agp_allocate *)data);
797 case AGPIOC_DEALLOCATE:
798 return agp_deallocate_user(dev, *(int *) data);
801 return agp_bind_user(dev, (agp_bind *)data);
804 return agp_unbind_user(dev, (agp_unbind *)data);
812 agp_mmap(struct cdev *kdev, vm_offset_t offset, vm_paddr_t *paddr, int prot)
814 device_t dev = KDEV2DEV(kdev);
815 struct agp_softc *sc = device_get_softc(dev);
817 if (offset > AGP_GET_APERTURE(dev))
819 *paddr = rman_get_start(sc->as_aperture) + offset;
823 /* Implementation of the kernel api */
828 device_t *children, child;
833 if (devclass_get_devices(agp_devclass, &children, &count) != 0)
836 for (i = 0; i < count; i++) {
837 if (device_is_attached(children[i])) {
842 free(children, M_TEMP);
846 enum agp_acquire_state
847 agp_state(device_t dev)
849 struct agp_softc *sc = device_get_softc(dev);
854 agp_get_info(device_t dev, struct agp_info *info)
856 struct agp_softc *sc = device_get_softc(dev);
859 pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
860 info->ai_aperture_base = rman_get_start(sc->as_aperture);
861 info->ai_aperture_size = rman_get_size(sc->as_aperture);
862 info->ai_memory_allowed = sc->as_maxmem;
863 info->ai_memory_used = sc->as_allocated;
867 agp_acquire(device_t dev)
869 return agp_acquire_helper(dev, AGP_ACQUIRE_KERNEL);
873 agp_release(device_t dev)
875 return agp_release_helper(dev, AGP_ACQUIRE_KERNEL);
879 agp_enable(device_t dev, u_int32_t mode)
881 return AGP_ENABLE(dev, mode);
884 void *agp_alloc_memory(device_t dev, int type, vm_size_t bytes)
886 return (void *) AGP_ALLOC_MEMORY(dev, type, bytes);
889 void agp_free_memory(device_t dev, void *handle)
891 struct agp_memory *mem = (struct agp_memory *) handle;
892 AGP_FREE_MEMORY(dev, mem);
895 int agp_bind_memory(device_t dev, void *handle, vm_offset_t offset)
897 struct agp_memory *mem = (struct agp_memory *) handle;
898 return AGP_BIND_MEMORY(dev, mem, offset);
901 int agp_unbind_memory(device_t dev, void *handle)
903 struct agp_memory *mem = (struct agp_memory *) handle;
904 return AGP_UNBIND_MEMORY(dev, mem);
907 void agp_memory_info(device_t dev, void *handle, struct
910 struct agp_memory *mem = (struct agp_memory *) handle;
912 mi->ami_size = mem->am_size;
913 mi->ami_physical = mem->am_physical;
914 mi->ami_offset = mem->am_offset;
915 mi->ami_is_bound = mem->am_is_bound;