5 #include <dev/drm2/drmP.h>
7 #include <dev/agp/agpreg.h>
8 #include <dev/pci/pcireg.h>
10 devclass_t drm_devclass;
12 MALLOC_DEFINE(DRM_MEM_DMA, "drm_dma", "DRM DMA Data Structures");
13 MALLOC_DEFINE(DRM_MEM_SAREA, "drm_sarea", "DRM SAREA Data Structures");
14 MALLOC_DEFINE(DRM_MEM_DRIVER, "drm_driver", "DRM DRIVER Data Structures");
15 MALLOC_DEFINE(DRM_MEM_MAGIC, "drm_magic", "DRM MAGIC Data Structures");
16 MALLOC_DEFINE(DRM_MEM_MINOR, "drm_minor", "DRM MINOR Data Structures");
17 MALLOC_DEFINE(DRM_MEM_IOCTLS, "drm_ioctls", "DRM IOCTL Data Structures");
18 MALLOC_DEFINE(DRM_MEM_MAPS, "drm_maps", "DRM MAP Data Structures");
19 MALLOC_DEFINE(DRM_MEM_BUFS, "drm_bufs", "DRM BUFFER Data Structures");
20 MALLOC_DEFINE(DRM_MEM_SEGS, "drm_segs", "DRM SEGMENTS Data Structures");
21 MALLOC_DEFINE(DRM_MEM_PAGES, "drm_pages", "DRM PAGES Data Structures");
22 MALLOC_DEFINE(DRM_MEM_FILES, "drm_files", "DRM FILE Data Structures");
23 MALLOC_DEFINE(DRM_MEM_QUEUES, "drm_queues", "DRM QUEUE Data Structures");
24 MALLOC_DEFINE(DRM_MEM_CMDS, "drm_cmds", "DRM COMMAND Data Structures");
25 MALLOC_DEFINE(DRM_MEM_MAPPINGS, "drm_mapping", "DRM MAPPING Data Structures");
26 MALLOC_DEFINE(DRM_MEM_BUFLISTS, "drm_buflists", "DRM BUFLISTS Data Structures");
27 MALLOC_DEFINE(DRM_MEM_CTXBITMAP, "drm_ctxbitmap",
28 "DRM CTXBITMAP Data Structures");
29 MALLOC_DEFINE(DRM_MEM_SGLISTS, "drm_sglists", "DRM SGLISTS Data Structures");
30 MALLOC_DEFINE(DRM_MEM_MM, "drm_sman", "DRM MEMORY MANAGER Data Structures");
31 MALLOC_DEFINE(DRM_MEM_HASHTAB, "drm_hashtab", "DRM HASHTABLE Data Structures");
32 MALLOC_DEFINE(DRM_MEM_KMS, "drm_kms", "DRM KMS Data Structures");
33 MALLOC_DEFINE(DRM_MEM_VBLANK, "drm_vblank", "DRM VBLANK Handling Data");
35 const char *fb_mode_option = NULL;
37 #define NSEC_PER_USEC 1000L
38 #define NSEC_PER_SEC 1000000000L
41 timeval_to_ns(const struct timeval *tv)
43 return ((int64_t)tv->tv_sec * NSEC_PER_SEC) +
44 tv->tv_usec * NSEC_PER_USEC;
48 ns_to_timeval(const int64_t nsec)
59 tv.tv_sec = nsec / NSEC_PER_SEC;
60 rem = nsec % NSEC_PER_SEC;
65 tv.tv_usec = rem / 1000;
69 /* Copied from OFED. */
70 unsigned long drm_linux_timer_hz_mask;
73 drm_linux_timer_init(void *arg)
77 * Compute an internal HZ value which can divide 2**32 to
78 * avoid timer rounding problems when the tick value wraps
81 drm_linux_timer_hz_mask = 1;
82 while (drm_linux_timer_hz_mask < (unsigned long)hz)
83 drm_linux_timer_hz_mask *= 2;
84 drm_linux_timer_hz_mask--;
86 SYSINIT(drm_linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, drm_linux_timer_init, NULL);
88 static const drm_pci_id_list_t *
89 drm_find_description(int vendor, int device, const drm_pci_id_list_t *idlist)
93 for (i = 0; idlist[i].vendor != 0; i++) {
94 if ((idlist[i].vendor == vendor) &&
95 ((idlist[i].device == device) ||
96 (idlist[i].device == 0))) {
104 * drm_probe_helper: called by a driver at the end of its probe
108 drm_probe_helper(device_t kdev, const drm_pci_id_list_t *idlist)
110 const drm_pci_id_list_t *id_entry;
113 vendor = pci_get_vendor(kdev);
114 device = pci_get_device(kdev);
116 if (pci_get_class(kdev) != PCIC_DISPLAY ||
117 (pci_get_subclass(kdev) != PCIS_DISPLAY_VGA &&
118 pci_get_subclass(kdev) != PCIS_DISPLAY_OTHER))
121 id_entry = drm_find_description(vendor, device, idlist);
122 if (id_entry != NULL) {
123 if (device_get_desc(kdev) == NULL) {
124 DRM_DEBUG("%s desc: %s\n",
125 device_get_nameunit(kdev), id_entry->name);
126 device_set_desc(kdev, id_entry->name);
135 * drm_attach_helper: called by a driver at the end of its attach
139 drm_attach_helper(device_t kdev, const drm_pci_id_list_t *idlist,
140 struct drm_driver *driver)
142 struct drm_device *dev;
146 dev = device_get_softc(kdev);
148 vendor = pci_get_vendor(kdev);
149 device = pci_get_device(kdev);
150 dev->id_entry = drm_find_description(vendor, device, idlist);
152 ret = drm_get_pci_dev(kdev, dev, driver);
158 drm_generic_suspend(device_t kdev)
160 struct drm_device *dev;
163 DRM_DEBUG_KMS("Starting suspend\n");
165 dev = device_get_softc(kdev);
166 if (dev->driver->suspend) {
169 state.event = PM_EVENT_SUSPEND;
170 error = -dev->driver->suspend(dev, state);
175 error = bus_generic_suspend(kdev);
178 DRM_DEBUG_KMS("Finished suspend: %d\n", error);
184 drm_generic_resume(device_t kdev)
186 struct drm_device *dev;
189 DRM_DEBUG_KMS("Starting resume\n");
191 dev = device_get_softc(kdev);
192 if (dev->driver->resume) {
193 error = -dev->driver->resume(dev);
198 error = bus_generic_resume(kdev);
201 DRM_DEBUG_KMS("Finished resume: %d\n", error);
207 drm_generic_detach(device_t kdev)
209 struct drm_device *dev;
212 dev = device_get_softc(kdev);
216 /* Clean up PCI resources allocated by drm_bufs.c. We're not really
217 * worried about resource consumption while the DRM is inactive (between
218 * lastclose and firstopen or unload) because these aren't actually
219 * taking up KVA, just keeping the PCI resource allocated.
221 for (i = 0; i < DRM_MAX_PCI_RESOURCE; i++) {
222 if (dev->pcir[i] == NULL)
224 bus_release_resource(dev->dev, SYS_RES_MEMORY,
225 dev->pcirid[i], dev->pcir[i]);
229 if (pci_disable_busmaster(dev->dev))
230 DRM_ERROR("Request to disable bus-master failed.\n");
236 drm_add_busid_modesetting(struct drm_device *dev, struct sysctl_ctx_list *ctx,
237 struct sysctl_oid *top)
239 struct sysctl_oid *oid;
241 snprintf(dev->busid_str, sizeof(dev->busid_str),
242 "pci:%04x:%02x:%02x.%d", dev->pci_domain, dev->pci_bus,
243 dev->pci_slot, dev->pci_func);
244 oid = SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "busid",
245 CTLFLAG_RD, dev->busid_str, 0, NULL);
248 dev->modesetting = (dev->driver->driver_features & DRIVER_MODESET) != 0;
249 oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO,
250 "modesetting", CTLFLAG_RD, &dev->modesetting, 0, NULL);
258 drm_device_find_capability(struct drm_device *dev, int cap)
261 return (pci_find_cap(dev->dev, cap, NULL) == 0);
265 drm_pci_device_is_agp(struct drm_device *dev)
267 if (dev->driver->device_is_agp != NULL) {
270 /* device_is_agp returns a tristate, 0 = not AGP, 1 = definitely
271 * AGP, 2 = fall back to PCI capability
273 ret = (*dev->driver->device_is_agp)(dev);
274 if (ret != DRM_MIGHT_BE_AGP)
278 return (drm_device_find_capability(dev, PCIY_AGP));
282 drm_pci_device_is_pcie(struct drm_device *dev)
285 return (drm_device_find_capability(dev, PCIY_EXPRESS));
289 dmi_found(const struct dmi_system_id *dsi)
291 char *hw_vendor, *hw_prod;
295 hw_vendor = kern_getenv("smbios.planar.maker");
296 hw_prod = kern_getenv("smbios.planar.product");
298 for (i = 0; i < nitems(dsi->matches); i++) {
299 slot = dsi->matches[i].slot;
304 case DMI_BOARD_VENDOR:
305 if (hw_vendor != NULL &&
306 !strcmp(hw_vendor, dsi->matches[i].substr)) {
312 case DMI_PRODUCT_NAME:
314 if (hw_prod != NULL &&
315 !strcmp(hw_prod, dsi->matches[i].substr)) {
334 dmi_check_system(const struct dmi_system_id *sysid)
336 const struct dmi_system_id *dsi;
339 for (res = false, dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
340 if (dmi_found(dsi)) {
342 if (dsi->callback != NULL && dsi->callback(dsi))
351 drm_mtrr_add(unsigned long offset, unsigned long size, unsigned int flags)
354 struct mem_range_desc mrdesc;
356 mrdesc.mr_base = offset;
357 mrdesc.mr_len = size;
358 mrdesc.mr_flags = flags;
359 act = MEMRANGE_SET_UPDATE;
360 strlcpy(mrdesc.mr_owner, "drm", sizeof(mrdesc.mr_owner));
361 return (-mem_range_attr_set(&mrdesc, &act));
365 drm_mtrr_del(int handle __unused, unsigned long offset, unsigned long size,
369 struct mem_range_desc mrdesc;
371 mrdesc.mr_base = offset;
372 mrdesc.mr_len = size;
373 mrdesc.mr_flags = flags;
374 act = MEMRANGE_SET_REMOVE;
375 strlcpy(mrdesc.mr_owner, "drm", sizeof(mrdesc.mr_owner));
376 return (-mem_range_attr_set(&mrdesc, &act));
381 drm_clflush_pages(vm_page_t *pages, unsigned long num_pages)
384 #if defined(__i386__) || defined(__amd64__)
385 pmap_invalidate_cache_pages(pages, num_pages);
387 DRM_ERROR("drm_clflush_pages not implemented on this architecture");
392 drm_clflush_virt_range(char *addr, unsigned long length)
395 #if defined(__i386__) || defined(__amd64__)
396 pmap_invalidate_cache_range((vm_offset_t)addr,
397 (vm_offset_t)addr + length, TRUE);
399 DRM_ERROR("drm_clflush_virt_range not implemented on this architecture");
404 hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize,
405 char *linebuf, size_t linebuflen, bool ascii __unused)
411 while (i < len && j <= linebuflen) {
412 c = ((const char *)buf)[i];
415 if (i % rowsize == 0) {
416 /* Newline required. */
417 sprintf(linebuf + j, "\n");
419 } else if (i % groupsize == 0) {
420 /* Space required. */
421 sprintf(linebuf + j, " ");
426 if (j > linebuflen - 4)
429 sprintf(linebuf + j, "%02X", c);
436 sprintf(linebuf + j, "\n");
441 #include <sys/sysproto.h>
443 MODULE_DEPEND(DRIVER_NAME, linux, 1, 1, 1);
445 #define LINUX_IOCTL_DRM_MIN 0x6400
446 #define LINUX_IOCTL_DRM_MAX 0x64ff
448 static linux_ioctl_function_t drm_linux_ioctl;
449 static struct linux_ioctl_handler drm_handler = {drm_linux_ioctl,
450 LINUX_IOCTL_DRM_MIN, LINUX_IOCTL_DRM_MAX};
452 /* The bits for in/out are switched on Linux */
453 #define LINUX_IOC_IN IOC_OUT
454 #define LINUX_IOC_OUT IOC_IN
457 drm_linux_ioctl(DRM_STRUCTPROC *p, struct linux_ioctl_args* args)
462 args->cmd &= ~(LINUX_IOC_IN | LINUX_IOC_OUT);
463 if (cmd & LINUX_IOC_IN)
465 if (cmd & LINUX_IOC_OUT)
466 args->cmd |= IOC_OUT;
468 error = ioctl(p, (struct ioctl_args *)args);
472 #endif /* DRM_LINUX */
475 drm_modevent(module_t mod, int type, void *data)
480 TUNABLE_INT_FETCH("drm.debug", &drm_debug);
481 TUNABLE_INT_FETCH("drm.notyet", &drm_notyet);
487 static moduledata_t drm_mod = {
493 DECLARE_MODULE(drmn, drm_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
494 MODULE_VERSION(drmn, 1);
495 MODULE_DEPEND(drmn, agp, 1, 1, 1);
496 MODULE_DEPEND(drmn, pci, 1, 1, 1);
497 MODULE_DEPEND(drmn, mem, 1, 1, 1);
498 MODULE_DEPEND(drmn, linuxkpi, 1, 1, 1);
499 MODULE_DEPEND(drmn, iicbus, 1, 1, 1);