When the transfer is complete, continue the bus master operation.
When the transfer is complete, continue the bus-master operation.
Certain hardware platforms may restrict DMA capabilities in a bus-specific way. Drivers should use ddi_slaveonly(9F) to determine if the device is in a slot in which DMA is possible. For an example, see "attach( )" on page 101.
uint_t dma_attr_version;/* version number of this structure */ uint64_t dma_attr_addr_lo;/* lower bound of bus address range */ uint64_t dma_attr_addr_hi;/* inclusive upper bound of range */ uint64_t dma_attr_count_max;/* max DMA transfer count - 1 */ uint64_t dma_attr_align;/* DMA address aligment */ uint_t dma_attr_burstsizes;/* DMA burstsize */ uint32_t dma_attr_minxfer;/* minimum DMA transfer size */ uint64_t dma_attr_maxxfer;/* max transfer sizeof a single I/O */ uint64_t dma_attr_seg;/* segment boundary restriction */ int dma_attr_sgllen;/* length of DMA scatter-gather list * uint32_t dma_attr_granular;/* granularity of transfer count */ uint_t dma_attr_flags;/* additional DMA flags */
static ddi_dma_attr_t attributes = { DMA_ATTR_V0, /* Version number */ 0xFF000000, /* low address */ 0xFFFFFFFF, /* high address */ 0xFFFFFFFF, /* counter register max */ 1, /* byte alignment */ 0x7, /* burst sizes: 0x1 | 0x2 | 0x4 */ 0x1, /* minimum transfer size */ 0xFFFFFFFF, /* max xfer size */ 0xFFFFFFFF, /* address register max */ 1, /* no scatter-gather */ 512, /* device operates on sectors */ 0, /* attr flag: set to 0 */ };
static ddi_dma_attr_t attributes = { DMA_ATTR_V0, /* Version number */ 0x00000000, /* low address */ 0xFFFFFFFF, /* high address */ 0xFFFFFFFF, /* counter register max */ 1, /* byte alignment */ 0x1FE, /* burst sizes */
0x2, /* minimum transfer size */ 0xFFFFFFFF, /* max xfer size */ 0xFFFFFF, /* address register max */ 17, /* no scatter-gather */ 512, /* device operates on sectors */ 0, /* attr flag: set to 0 */ };
static ddi_dma_attr_t attributes = { DMA_ATTR_V0, /* Version number */ 0x00000000, /* low address */ 0x00FFFFFF, /* high address */ 0xFFFF, /* counter register max */ 1, /* byte alignment */ 0x7, /* burst sizes */ 0x1, /* minimum transfer size */ 0xFFFFFFFF, /* max xfer size */ 0x00007FFF, /* address register max */ 17, /* no scatter-gather */ 512, /* device operates on sectors */ 0, /* attr flag: set to 0 */ };
int ddi_dma_alloc_handle(dev_info_t *dip, ddi_dma_attr_t *attr, int (*callback)(void *), void *arg, ddi_dma_handle_t *handlep);
waitfp | Indicated Action |
---|---|
DDI_DMA_DONTWAIT | Driver does not need to wait for resources to become available. |
DDI_DMA_SLEEP | Driver is willing to wait indefinitely for resources to become available. |
Other values | The address of a function to be called when resources are likely to be available. |
Type of Object | Resource Allocation Interface |
---|---|
Memory allocated within the driver using ddi_dma_mem_alloc(9F) | ddi_dma_addr_bind_handle(9F) |
Requests from the file system through strategy(9E) | ddi_dma_buf_bind_handle(9F) |
Memory in user space that has been locked down using physio(9F) | ddi_dma_buf_bind_handle(9F) |
int ddi_dma_addr_bind_handle(ddi_dma_handle_t handle, struct as *as, caddr_t addr, size_t len, uint_t flags, int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_cookie_t *cookiep, uint_t *ccountp); int ddi_dma_buf_bind_handle(ddi_dma_handle_t handle, struct buf *bp, uint_t flags, int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_cookie_t *cookiep, uint_t *ccountp);
struct buf *bp; /* current transfer */ ddi_dma_handle_t handle; struct xxiopb *iopb_array;/* for I/O Parameter Blocks */ ddi_dma_handle_t iopb_handle;
uint32_t dma_addr; /* starting address for DMA */ uint32_t dma_size; /* amount of data to transfer */
struct sglentry { uint32_t dma_addr; uint32_t dma_size; } sglist[SGLLEN]; caddr_t iopb_addr;/* When written informs device of the next */ /* command's parameter block address. */ /* When read after an interrupt,contains */ /* the address of the completed command. */
static int xxstart(caddr_t arg) { struct xxstate *xsp = (struct xxstate *)arg; struct device_reg *regp; int flags; mutex_enter(&xsp->mu); if (xsp->busy) { /* transfer in progress */ mutex_exit(&xsp->mu); return (0); } xsp->busy = 1; mutex_exit(&xsp->mu); regp = xsp->regp; if (transfer is a read) { flags = DDI_DMA_READ; } else { flags = DDI_DMA_WRITE; } if (ddi_dma_buf_bind_handle(xsp->handle,xsp->bp,flags, xxstart, (caddr_t)xsp, &cookie, &ccount) != DDI_DMA_MAPPED) { /* really should check all return values in a switch */ return (DDI_DMA_CALLBACK_RUNOUT); } ... program the DMA engine ... return (DDI_DMA_CALLBACK_DONE); }
#define BEST_BURST_SIZE 0x20 /* 32 bytes */ if (ddi_dma_buf_bind_handle(xsp->handle,xsp->bp,flags, xxstart, (caddr_t)xsp, &cookie, &ccount) != DDI_DMA_MAPPED) { /* error handling */ return (0); } burst = ddi_dma_burstsizes(xsp->handle); /* check which bit is set and choose one burstsize to */ /* program the DMA engine */ if (burst & BEST_BURST_SIZE) { program DMA engine to use this burst size } else { other cases }
uint64_t dmac_laddress; /* unsigned 64-bit address */ uint32_t dmac_address; /* unsigned 32-bit address */ size_t dmac_size; /* transfer size */ u_int dmac_type; /* bus-specific type bits */
ddi_dma_cookie_t cookie; if (ddi_dma_buf_bind_handle(xsp->handle,xsp->bp,flags, xxstart, (caddr_t)xsp, &cookie, &xsp->ccount) != DDI_DMA_MAPPED) { /* error handling */ return (0); } sglp = regp->sglist; for (cnt = 1; cnt <= SGLLEN; cnt++, sglp++) { /* store the cookie parms into the S/G list */ ddi_put32(xsp->access_hdl, &sglp->dma_size, (uint32_t)cookie.dmac_size); ddi_put32(xsp->access_hdl, &sglp->dma_addr, cookie.dmac_address); /* Check for end of cookie list */ if (cnt == xsp->ccount) break; /* Get next DMA cookie */ (void) ddi_dma_nextcookie(xsp->handle, &cookie); } /* start DMA transfer */ ddi_put8(xsp->access_hdl, ®p->csr, ENABLE_INTERRUPTS | START_TRANSFER);
static u_int xxintr(caddr_t arg) { struct xxstate *xsp = (struct xxstate *)arg; uint8_t status, temp; mutex_enter(&xsp->mu); /* read status */ status = ddi_get8(xsp->access_hdl, &xsp->regp->csr); if (!(status & INTERRUPTING)) { mutex_exit(&xsp->mu); return (DDI_INTR_UNCLAIMED); } ddi_put8(xsp->access_hdl, &xsp->regp->csr, CLEAR_INTERRUPT); /* for store buffers */ temp = ddi_get8(xsp->access_hdl, &xsp->regp->csr); ddi_dma_unbind_handle(xsp->handle); ... check for errors ... xsp->busy = 0; mutex_exit(&xsp->mu);
(void) xxstart((caddr_t)xsp); } return (DDI_INTR_CLAIMED); }
void ddi_dma_free_handle(ddi_dma_handle_t *handlep);
int cancel_callbacks; /* detach(9E) sets this to */ /* prevent callbacks from */ /* rescheduling themselves */ int callback_count; /* number of outstanding /* callbacks */ kmutex_t callback_mutex; /* protects callback_count and */ /* cancel_callbacks. */ kcondvar_t callback_cv; /* condition is that /* callback_count is zero*/ /* detach(9E) waits on it */
static int xxdetach(dev_info_t *dip, ddi_detach_cmd_t cmd) { ... mutex_enter(&xsp->callback_mutex); xsp->cancel_callbacks = 1; while (xsp->callback_count > 0) { cv_wait(&xsp->callback_cv, &xsp->callback_mutex); } mutex_exit(&xsp->callback_mutex); ... }
static int xxstrategy(struct buf *bp) { ... mutex_enter(&xsp->callback_mutex); xsp->bp = bp; error = ddi_dma_buf_bind_handle(xsp->handle, xsp->bp, flags, xxdmacallback, (caddr_t)xsp, &cookie, &ccount); if (error == DDI_DMA_NORESOURCES) xsp->callback_count++; mutex_exit(&xsp->callback_mutex); ... } static int xxdmacallback(caddr_t callbackarg) { struct xxstate *xsp = (struct xxstate *)callbackarg; ... mutex_enter(&xsp->callback_mutex); if (xsp->cancel_callbacks) { /* do not reschedule, in process of detaching */ xsp->callback_count--; if (xsp->callback_count == 0) cv_signal(&xsp->callback_cv); mutex_exit(&xsp->callback_mutex); return (DDI_DMA_CALLBACK_DONE);/* don't reschedule it */ } /* * Presumably at this point the device is still active * and will not be detached until the DMA has completed. * A return of 0 means try again later */ error = ddi_dma_buf_bind_handle(xsp->handle, xsp->bp, flags, DDI_DMA_DONTWAIT, NULL, &cookie, &ccount); if (error == DDI_DMA_MAPPED) { ... program the DMA engine ... xsp->callback_count--; mutex_exit(&xsp->callback_mutex); return (DDI_DMA_CALLBACK_DONE); } if (error != DDI_DMA_NORESOURCES) { xsp->callback_count--;
mutex_exit(&xsp->callback_mutex); return (DDI_DMA_CALLBACK_DONE); } mutex_exit(&xsp->callback_mutex); return (DDI_DMA_CALLBACK_RUNOUT); }
int ddi_dma_sync(ddi_dma_handle_t handle, off_t off, size_t length, u_int type);
if (ddi_dma_sync(xsp->handle, 0, length, DDI_DMA_SYNC_FORCPU) == DDI_SUCCESS) { /* the CPU can now access the transferred data */ ... } else { error handling }
int partial; /* DMA object partially mapped, use windows */ int nwin; /* number of DMA windows for this object */ int windex; /* index of the current active window */
static int xxstart (caddr_t arg) { struct xxstate *xsp = (struct xxstate *)arg; struct device_reg *regp = xsp->reg; ddi_dma_cookie_t cookie; int status; mutex_enter(&xsp->mu); if (xsp->busy) { /* transfer in progress */ mutex_exit(&xsp->mu); return (0); } xsp->busy = 1; mutex_exit(&xsp->mu);
flags = DDI_DMA_READ; } else { flags = DDI_DMA_WRITE; } flags |= DDI_DMA_PARTIAL; status = ddi_dma_buf_bind_handle(xsp->handle, xsp->bp, flags, xxstart, (caddr_t)xsp, &cookie, &ccount); if (status != DDI_DMA_MAPPED && status != DDI_DMA_PARTIAL_MAP) return (0); if (status == DDI_DMA_PARTIAL_MAP) { ddi_dma_numwin(xsp->handle, &xsp->nwin); xsp->partial = 1; xsp->windex = 0;
} else { xsp->partial = 0; }
... return (1); }
static u_int xxintr(caddr_t arg) { struct xxstate *xsp = (struct xxstate *)arg; uint8_t status, temp; mutex_enter(&xsp->mu); /* read status */ status = ddi_get8(xsp->access_hdl, &xsp->regp->csr); if (!(status & INTERRUPTING)) { mutex_exit(&xsp->mu); return (DDI_INTR_UNCLAIMED); } ddi_put8(xsp->access_hdl,&xsp->regp->csr, CLEAR_INTERRUPT); /* for store buffers */ temp = ddi_get8(xsp->access_hdl, &xsp->regp->csr); if (an error occurred during transfer) { bioerror(xsp->bp, EIO); xsp->partial = 0; } else { xsp->bp->b_resid -= amount transferred; } if (xsp->partial && (++xsp->windex < xsp->nwin)) { /* device still marked busy to protect state */ mutex_exit(&xsp->mu); (void) ddi_dma_getwin(xsp->handle, xsp->windex, &offset, &len, &cookie, &ccount);
... return (DDI_INTR_CLAIMED); } ddi_dma_unbind_handle(xsp->handle); biodone(xsp->bp); xsp->busy = 0; xsp->partial = 0; mutex_exit(&xsp->mu);
(void) xxstart((caddr_t)xsp); } return (DDI_INTR_CLAIMED); }
int ddi_dma_mem_alloc(ddi_dma_handle_t handle, size_t length, ddi_device_acc_attr_t *accattrp, uint_t xfermodes, int (*callback)(void *), void *arg, caddr_t *kaddrp, size_t *real_length, ddi_acc_handle_t *handlep);
if (ddi_dma_mem_alloc(xsp->iopb_handle, size, &accattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &xsp->iopb_array, &real_length, &xsp->acchandle) != DDI_SUCCESS) { error handling goto failure; } if (ddi_dma_addr_bind_handle(xsp->iopb_handle, NULL, xsp->iopb_array, real_length, DDI_DMA_READ | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &cookie, &count) != DDI_DMA_MAPPED) { error handling ddi_dma_mem_free(&xsp->acchandle); goto failure; }
Copyright 1997 Sun Microsystems, Inc. All rights reserved.
Comments on: Writing Device Drivers