160 likes | 174 Views
Source code from http://lxr.free-electrons.com/source/drivers/dma/ intel_mid_dma.c#L37. Intel Langwell DMA Driver. static int get_ch_index ( int *status, unsigned int base) { int i ; for ( i = 0; i < MAX_CHAN; i ++) { if (*status & (1 << ( i + base))) {
E N D
Source code from http://lxr.free-electrons.com/source/drivers/dma/ intel_mid_dma.c#L37 Intel LangwellDMA Driver
static intget_ch_index(int *status, unsigned int base) { inti; for (i = 0; i < MAX_CHAN; i++) { if (*status & (1 << (i + base))) { *status = *status & ~(1 << (i + base)); pr_debug("MDMA: index %d New status %x\n", I, *status); return i; } } return -1; } get_ch_index : convert status to channel status: status mask, base: dmach base value Modify the status mask and return the channel index needing attention (or -1 if neither)
static intget_block_ts(intlen, inttx_width, intblock_size) { intbyte_width = 0, block_ts = 0; switch (tx_width) { case DMA_SLAVE_BUSWIDTH_1_BYTE: byte_width = 1; break; case DMA_SLAVE_BUSWIDTH_2_BYTES: byte_width = 2; break; case DMA_SLAVE_BUSWIDTH_4_BYTES: default: byte_width = 4; break; } block_ts = len/byte_width; if (block_ts > block_size) block_ts = 0xFFFF; return block_ts; } get_block_ts - calculates dma transaction length len: dma transfer length, tx_width: dma transfer src width, block_size: dma controller max block size Based on src width calculate the DMA transaction length in data items Return data items or FFFF if exceeds max length for block
static void dmac1_mask_periphral_intr(structmiddma_device *mid) { u32 pimr; if (mid->pimr_mask) { pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK); pimr |= mid->pimr_mask; writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK); } return; } dmac1_mask_periphral_intr - mask the periphral interrupt mid: dma device for which masking is required
static void dmac1_unmask_periphral_intr(structintel_mid_dma_chan *midc) { u32 pimr; structmiddma_device *mid = to_middma_device(midc->chan.device); if (mid->pimr_mask) { pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK); pimr &= ~mid->pimr_mask; writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK); } return; } dmac1_unmask_periphral_intr - unmask the periphral interrupt midc: dma channel for which masking is required
static void enable_dma_interrupt(structintel_mid_dma_chan *midc) { dmac1_unmask_periphral_intr(midc); /*en ch interrupts*/ iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR); iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR); return; } enable_dma_interrupt - enable the periphral interrupt midc: dma channel for which enable interrupt is required
static void disable_dma_interrupt(structintel_mid_dma_chan *midc) { iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK); iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR); iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR); return; } disable_dma_interrupt - disable the periphral interrupt midc: dma channel for which disable interrupt is required
static void disable_dma_interrupt(structintel_mid_dma_chan *midc) { iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK); iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR); iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR); return; } disable_dma_interrupt - disable the periphral interrupt midc: dma channel for which disable interrupt is required
static void midc_dostart(structintel_mid_dma_chan *midc, structintel_mid_dma_desc *first) { structmiddma_device *mid = to_middma_device(midc->chan.device); /* channel is idle */ if (midc->busy && test_ch_en(midc->dma_base, midc->ch_id)) { /*error*/ pr_err("ERR_MDMA: channel is busy in start\n"); /* The tasklet will hopefully advance the queue... */ return; } midc->busy = true; /*write registers and en*/ iowrite32(first->sar, midc->ch_regs + SAR); iowrite32(first->dar, midc->ch_regs + DAR); iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH); iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW); iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW); iowrite32(first->ctl_hi, midc->ch_regs + CTL_HIGH); pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n", (int)first->sar, (int)first->dar, first->cfg_hi, first->cfg_lo, first->ctl_hi, first->ctl_lo); first->status = DMA_IN_PROGRESS; iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN); } midc_dostart - begin a DMA transaction midc: channel for which transanction is to be started first: descriptor for the DMA channel
static enumdma_statusintel_mid_dma_tx_status(structdma_chan *chan, dma_cookie_t cookie, structdma_tx_state *txstate) { structintel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); enumdma_status ret; ret = dma_cookie_status(chan, cookie, txstate); if (ret != DMA_COMPLETE) { spin_lock_bh(&midc->lock); midc_scan_descriptors(to_middma_device(chan->device), midc); spin_unlock_bh(&midc->lock); ret = dma_cookie_status(chan, cookie, txstate); } return ret; } intel_mid_dma_tx_status - Return status of transation chan: channel forwhere status needs to be checked cookie: cookie for txn, txstate: DMA txn state
static intintel_mid_dma_device_control(structdma_chan *chan, enumdma_ctrl_cmdcmd, unsigned long arg) { ... if (cmd != DMA_TERMINATE_ALL) return -ENXIO; /*Suspend and disable the channel*/ cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW); cfg_lo.cfgx.ch_susp = 1; iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW); iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN); midc->busy = false; /* Disable interrupts */ disable_dma_interrupt(midc); midc->descs_allocated = 0; ... return 0; } intel_mid_dma_device_control - Perform DMA control command chan: channel for DMA control cmd: control command, arg: command argument value
static structdma_async_tx_descriptor *intel_mid_dma_prep_memcpy(structdma_chan *chan, dma_addr_tdest, dma_addr_tsrc, size_tlen, unsigned long flags) { ... pr_debug("MDMA:called for DMA %x CH %d Length %zu\n", midc->dma->pci_id, midc->ch_id, len); /*calculate CFG_LO*/ if (mids->hs_mode == LNW_DMA_SW_HS) { cfg_lo.cfg_lo = 0; cfg_lo.cfgx.hs_sel_dst = 1; cfg_lo.cfgx.hs_sel_src = 1; } else if (mids->hs_mode == LNW_DMA_HW_HS) cfg_lo.cfg_lo = 0x00000; /*calculate CFG_HI*/ if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) cfg_hi.cfg_hi = 0; else { cfg_hi.cfg_hi = 0; if (midc->dma->pimr_mask) { cfg_hi.cfgx.protctl = 0x0; /*default value*/ cfg_hi.cfgx.fifo_mode = 1; … else { cfg_hi.cfgx.protctl = 0x1; /*default value*/ cfg_hi.cfgx.src_per = cfg_hi.cfgx.dst_per = midc->ch_id - midc->dma->chan_base; } }
/*calculate CTL_HI*/ ctl_hi.ctlx.reser = 0; ctl_hi.ctlx.done = 0; width = mids->dma_slave.src_addr_width; ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size); pr_debug("MDMA:calclen %d for block size %d\n", ctl_hi.ctlx.block_ts, midc->dma->block_size); /*calculate CTL_LO*/ ctl_lo.ctl_lo = 0; ctl_lo.ctlx.int_en = 1; ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst; ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst; ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width / 2; ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width / 2; ... } intel_mid_dma_prep_memcpy - Perform a DMA mem copy. chan: channel for DMA transfer dest: destination address, src: source address len: DMA transfer length, flags: DMA flags
static void intel_mid_dma_free_chan_resources(structdma_chan *chan) { if (true == midc->busy) { /*trying to free ch in use!*/ pr_err("ERR_MDMA: trying to free ch in use\n"); } spin_lock_bh(&midc->lock); midc->descs_allocated = 0; list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { list_del(&desc->desc_node); pci_pool_free(mid->dma_pool, desc, desc->txd.phys); } ... midc->in_use = false; midc->busy = false; /* Disable CH interrupts */ iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK); iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR); pm_runtime_put(&mid->pdev->dev); } intel_mid_dma_free_chan_resources - Frees the allocated resources on this DMA channel chan: channel requiring attention
static irqreturn_tintel_mid_dma_interrupt(intirq, void *data) { tfr_status = ioread32(mid->dma_base + RAW_TFR); err_status = ioread32(mid->dma_base + RAW_ERR); if (!tfr_status && !err_status) return IRQ_NONE; /*DMA Interrupt*/ pr_debug("MDMA:Got an interrupt on irq %d\n", irq); pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask); tfr_status &= mid->intr_mask; if (tfr_status) { /*need to disable intr*/ iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_TFR); iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_BLOCK); pr_debug("MDMA: Calling tasklet %x\n", tfr_status); call_tasklet = 1; } err_status &= mid->intr_mask; if (err_status) { iowrite32((err_status << INT_MASK_WE), mid->dma_base + MASK_ERR); call_tasklet = 1; } if (call_tasklet) tasklet_schedule(&mid->tasklet); return IRQ_HANDLED; } intel_mid_dma_interrupt - See if this is our interrupt if so then schedule the tasklet, otherwise ignore irq: IRQ where interrupt occurred
static void middma_shutdown(structpci_dev *pdev) { structmiddma_device *device = pci_get_drvdata(pdev); dma_async_device_unregister(&device->common); pci_pool_destroy(device->dma_pool); if (device->mask_reg) iounmap(device->mask_reg); if (device->dma_base) iounmap(device->dma_base); return; } middma_shutdown - Shutdown the DMA controller pdev: Controller PCI device structure Unregister DMA controller, clear all structures and free interrupt