svn commit: r225135 - projects/armv6/sys/arm/arm
Grzegorz Bernacki
gber at FreeBSD.org
Wed Aug 24 07:52:55 UTC 2011
Author: gber
Date: Wed Aug 24 07:52:55 2011
New Revision: 225135
URL: http://svn.freebsd.org/changeset/base/225135
Log:
Created new implementation of busdma for armv6.
Submitted by: Mark Tinguely
Added:
projects/armv6/sys/arm/arm/busdma_machdep-v6.c
Added: projects/armv6/sys/arm/arm/busdma_machdep-v6.c
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ projects/armv6/sys/arm/arm/busdma_machdep-v6.c Wed Aug 24 07:52:55 2011 (r225135)
@@ -0,0 +1,1502 @@
+/*-
+ * Copyright (c) 2010 Mark Tinguely
+ * Copyright (c) 2004 Olivier Houchard
+ * Copyright (c) 2002 Peter Grehan
+ * Copyright (c) 1997, 1998 Justin T. Gibbs.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * From i386/busdma_machdep.c 191438 2009-04-23 20:24:19Z jhb
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/arm/arm/v7_busdma_machdep.c 191438 2010-02-01 12:00:00Z jhb $");
+
+#define _ARM32_BUS_DMA_PRIVATE
+#include <sys/param.h>
+#include <sys/kdb.h>
+#include <ddb/ddb.h>
+#include <ddb/db_output.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/bus.h>
+#include <sys/interrupt.h>
+#include <sys/kernel.h>
+#include <sys/ktr.h>
+#include <sys/lock.h>
+#include <sys/proc.h>
+#include <sys/mutex.h>
+#include <sys/mbuf.h>
+#include <sys/uio.h>
+#include <sys/sysctl.h>
+
+#include <vm/vm.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+
+#include <machine/atomic.h>
+#include <machine/bus.h>
+#include <machine/cpufunc.h>
+#include <machine/md_var.h>
+
+#define MAX_BPAGES 64
+#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3
+#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
+
+#define FIX_DMAP_BUS_DMASYNC_POSTREAD
+
+struct bounce_zone;
+
+struct bus_dma_tag {
+ bus_dma_tag_t parent;
+ bus_size_t alignment;
+ bus_size_t boundary;
+ bus_addr_t lowaddr;
+ bus_addr_t highaddr;
+ bus_dma_filter_t *filter;
+ void *filterarg;
+ bus_size_t maxsize;
+ u_int nsegments;
+ bus_size_t maxsegsz;
+ int flags;
+ int ref_count;
+ int map_count;
+ bus_dma_lock_t *lockfunc;
+ void *lockfuncarg;
+ bus_dma_segment_t *segments;
+ struct bounce_zone *bounce_zone;
+ /*
+ * DMA range for this tag. If the page doesn't fall within
+ * one of these ranges, an error is returned. The caller
+ * may then decide what to do with the transfer. If the
+ * range pointer is NULL, it is ignored.
+ */
+ struct arm32_dma_range *ranges;
+ int _nranges;
+
+};
+
+struct bounce_page {
+ vm_offset_t vaddr; /* kva of bounce buffer */
+ bus_addr_t busaddr; /* Physical address */
+ vm_offset_t datavaddr; /* kva of client data */
+ bus_size_t datacount; /* client data count */
+ STAILQ_ENTRY(bounce_page) links;
+};
+
+struct sync_list {
+ vm_offset_t vaddr; /* kva of bounce buffer */
+ bus_size_t datacount; /* client data count */
+ STAILQ_ENTRY(sync_list) slinks;
+};
+
+int busdma_swi_pending;
+
+struct bounce_zone {
+ STAILQ_ENTRY(bounce_zone) links;
+ STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
+ int total_bpages;
+ int free_bpages;
+ int reserved_bpages;
+ int active_bpages;
+ int total_bounced;
+ int total_deferred;
+ int map_count;
+ bus_size_t alignment;
+ bus_addr_t lowaddr;
+ char zoneid[8];
+ char lowaddrid[20];
+ struct sysctl_ctx_list sysctl_tree;
+ struct sysctl_oid *sysctl_tree_top;
+};
+
+static struct mtx bounce_lock;
+static int total_bpages;
+static int busdma_zonecount;
+static STAILQ_HEAD(, bounce_zone) bounce_zone_list;
+
+SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
+SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
+ "Total bounce pages");
+
+struct bus_dmamap {
+ struct bp_list bpages;
+ int pagesneeded;
+ int pagesreserved;
+ bus_dma_tag_t dmat;
+ void *buf; /* unmapped buffer pointer */
+ bus_size_t buflen; /* unmapped buffer length */
+ pmap_t pmap;
+ bus_dmamap_callback_t *callback;
+ void *callback_arg;
+ STAILQ_ENTRY(bus_dmamap) links;
+ STAILQ_HEAD(,sync_list) slist;
+};
+
+static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
+static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
+
+static void init_bounce_pages(void *dummy);
+static int alloc_bounce_zone(bus_dma_tag_t dmat);
+static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
+static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
+ int commit);
+static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
+ vm_offset_t vaddr, bus_size_t size);
+static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
+int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
+static int _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
+ void *buf, bus_size_t buflen, int flags);
+
+static __inline int
+_bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr)
+{
+ int i;
+ for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) {
+ if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1])
+ || (lowaddr < phys_avail[i] &&
+ highaddr > phys_avail[i]))
+ return (1);
+ }
+ return (0);
+}
+
+static __inline struct arm32_dma_range *
+_bus_dma_inrange(struct arm32_dma_range *ranges, int nranges,
+ bus_addr_t curaddr)
+{
+ struct arm32_dma_range *dr;
+ int i;
+
+ for (i = 0, dr = ranges; i < nranges; i++, dr++) {
+ if (curaddr >= dr->dr_sysbase &&
+ round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len))
+ return (dr);
+ }
+
+ return (NULL);
+}
+
+/*
+ * Return true if a match is made.
+ *
+ * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
+ *
+ * If paddr is within the bounds of the dma tag then call the filter callback
+ * to check for a match, if there is no filter callback then assume a match.
+ */
+int
+run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
+{
+ int retval;
+
+ retval = 0;
+
+ do {
+ if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr)
+ || ((paddr & (dmat->alignment - 1)) != 0))
+ && (dmat->filter == NULL
+ || (*dmat->filter)(dmat->filterarg, paddr) != 0))
+ retval = 1;
+
+ dmat = dmat->parent;
+ } while (retval == 0 && dmat != NULL);
+ return (retval);
+}
+
+/*
+ * Convenience function for manipulating driver locks from busdma (during
+ * busdma_swi, for example). Drivers that don't provide their own locks
+ * should specify &Giant to dmat->lockfuncarg. Drivers that use their own
+ * non-mutex locking scheme don't have to use this at all.
+ */
+void
+busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
+{
+ struct mtx *dmtx;
+
+ dmtx = (struct mtx *)arg;
+ switch (op) {
+ case BUS_DMA_LOCK:
+ mtx_lock(dmtx);
+ break;
+ case BUS_DMA_UNLOCK:
+ mtx_unlock(dmtx);
+ break;
+ default:
+ panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
+ }
+}
+
+/*
+ * dflt_lock should never get called. It gets put into the dma tag when
+ * lockfunc == NULL, which is only valid if the maps that are associated
+ * with the tag are meant to never be defered.
+ * XXX Should have a way to identify which driver is responsible here.
+ */
+static void
+dflt_lock(void *arg, bus_dma_lock_op_t op)
+{
+ panic("driver error: busdma dflt_lock called");
+}
+
+/*
+ * Allocate a device specific dma_tag.
+ */
+int
+bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
+ bus_size_t boundary, bus_addr_t lowaddr,
+ bus_addr_t highaddr, bus_dma_filter_t *filter,
+ void *filterarg, bus_size_t maxsize, int nsegments,
+ bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
+ void *lockfuncarg, bus_dma_tag_t *dmat)
+{
+ bus_dma_tag_t newtag;
+ int error = 0;
+
+#if 0
+ if (!parent)
+ parent = arm_root_dma_tag;
+#endif
+
+ /* Basic sanity checking */
+ if (boundary != 0 && boundary < maxsegsz)
+ maxsegsz = boundary;
+
+ /* Return a NULL tag on failure */
+ *dmat = NULL;
+
+ if (maxsegsz == 0) {
+ return (EINVAL);
+ }
+
+ newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF,
+ M_ZERO | M_NOWAIT);
+ if (newtag == NULL) {
+ CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
+ __func__, newtag, 0, error);
+ return (ENOMEM);
+ }
+
+ newtag->parent = parent;
+ newtag->alignment = alignment;
+ newtag->boundary = boundary;
+ newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
+ newtag->highaddr = trunc_page((vm_paddr_t)highaddr) +
+ (PAGE_SIZE - 1);
+ newtag->filter = filter;
+ newtag->filterarg = filterarg;
+ newtag->maxsize = maxsize;
+ newtag->nsegments = nsegments;
+ newtag->maxsegsz = maxsegsz;
+ newtag->flags = flags;
+ newtag->ref_count = 1; /* Count ourself */
+ newtag->map_count = 0;
+ newtag->ranges = bus_dma_get_range();
+ newtag->_nranges = bus_dma_get_range_nb();
+ if (lockfunc != NULL) {
+ newtag->lockfunc = lockfunc;
+ newtag->lockfuncarg = lockfuncarg;
+ } else {
+ newtag->lockfunc = dflt_lock;
+ newtag->lockfuncarg = NULL;
+ }
+ newtag->segments = NULL;
+
+ /* Take into account any restrictions imposed by our parent tag */
+ if (parent != NULL) {
+ newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
+ newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
+ if (newtag->boundary == 0)
+ newtag->boundary = parent->boundary;
+ else if (parent->boundary != 0)
+ newtag->boundary = MIN(parent->boundary,
+ newtag->boundary);
+ if ((newtag->filter != NULL) ||
+ ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0))
+ newtag->flags |= BUS_DMA_COULD_BOUNCE;
+ if (newtag->filter == NULL) {
+ /*
+ * Short circuit looking at our parent directly
+ * since we have encapsulated all of its information
+ */
+ newtag->filter = parent->filter;
+ newtag->filterarg = parent->filterarg;
+ newtag->parent = parent->parent;
+ }
+ if (newtag->parent != NULL)
+ atomic_add_int(&parent->ref_count, 1);
+ }
+
+ if (_bus_dma_can_bounce(newtag->lowaddr, newtag->highaddr)
+ || newtag->alignment > 1)
+ newtag->flags |= BUS_DMA_COULD_BOUNCE;
+
+ if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
+ (flags & BUS_DMA_ALLOCNOW) != 0) {
+ struct bounce_zone *bz;
+
+ /* Must bounce */
+
+ if ((error = alloc_bounce_zone(newtag)) != 0) {
+ free(newtag, M_DEVBUF);
+ return (error);
+ }
+ bz = newtag->bounce_zone;
+
+ if (ptoa(bz->total_bpages) < maxsize) {
+ int pages;
+
+ pages = atop(maxsize) - bz->total_bpages;
+
+ /* Add pages to our bounce pool */
+ if (alloc_bounce_pages(newtag, pages) < pages)
+ error = ENOMEM;
+ }
+ /* Performed initial allocation */
+ newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
+ } else
+ newtag->bounce_zone = NULL;
+
+ if (error != 0) {
+ free(newtag, M_DEVBUF);
+ } else {
+ *dmat = newtag;
+ }
+ CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
+ __func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
+ return (error);
+}
+
+int
+bus_dma_tag_destroy(bus_dma_tag_t dmat)
+{
+ bus_dma_tag_t dmat_copy;
+ int error;
+
+ error = 0;
+ dmat_copy = dmat;
+
+ if (dmat != NULL) {
+
+ if (dmat->map_count != 0) {
+ error = EBUSY;
+ goto out;
+ }
+
+ while (dmat != NULL) {
+ bus_dma_tag_t parent;
+
+ parent = dmat->parent;
+ atomic_subtract_int(&dmat->ref_count, 1);
+ if (dmat->ref_count == 0) {
+ if (dmat->segments != NULL)
+ free(dmat->segments, M_DEVBUF);
+ free(dmat, M_DEVBUF);
+ /*
+ * Last reference count, so
+ * release our reference
+ * count on our parent.
+ */
+ dmat = parent;
+ } else
+ dmat = NULL;
+ }
+ }
+out:
+ CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error);
+ return (error);
+}
+
+/*
+ * Allocate a handle for mapping from kva/uva/physical
+ * address space into bus device space.
+ */
+int
+bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
+{
+ int error;
+
+ error = 0;
+
+ *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
+ M_NOWAIT | M_ZERO);
+ if (*mapp == NULL) {
+ CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
+ return (ENOMEM);
+ }
+ STAILQ_INIT(&((*mapp)->slist));
+
+ if (dmat->segments == NULL) {
+ dmat->segments = (bus_dma_segment_t *)malloc(
+ sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
+ M_NOWAIT);
+ if (dmat->segments == NULL) {
+ CTR3(KTR_BUSDMA, "%s: tag %p error %d",
+ __func__, dmat, ENOMEM);
+ free(*mapp, M_DEVBUF);
+ *mapp = NULL;
+ return (ENOMEM);
+ }
+ }
+ /*
+ * Bouncing might be required if the driver asks for an active
+ * exclusion region, a data alignment that is stricter than 1, and/or
+ * an active address boundary.
+ */
+ if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
+
+ /* Must bounce */
+ struct bounce_zone *bz;
+ int maxpages;
+
+ if (dmat->bounce_zone == NULL) {
+ if ((error = alloc_bounce_zone(dmat)) != 0) {
+ free(*mapp, M_DEVBUF);
+ *mapp = NULL;
+ return (error);
+ }
+ }
+ bz = dmat->bounce_zone;
+
+ /* Initialize the new map */
+ STAILQ_INIT(&((*mapp)->bpages));
+
+ /*
+ * Attempt to add pages to our pool on a per-instance
+ * basis up to a sane limit.
+ */
+ maxpages = MAX_BPAGES;
+ if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
+ || (bz->map_count > 0 && bz->total_bpages < maxpages)) {
+ int pages;
+
+ pages = MAX(atop(dmat->maxsize), 1);
+ pages = MIN(maxpages - bz->total_bpages, pages);
+ pages = MAX(pages, 1);
+ if (alloc_bounce_pages(dmat, pages) < pages)
+ error = ENOMEM;
+
+ if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
+ if (error == 0)
+ dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
+ } else {
+ error = 0;
+ }
+ }
+ bz->map_count++;
+ }
+ if (error == 0)
+ dmat->map_count++;
+ CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
+ __func__, dmat, dmat->flags, error);
+ return (error);
+}
+
+/*
+ * Destroy a handle for mapping from kva/uva/physical
+ * address space into bus device space.
+ */
+int
+bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
+{
+ if (STAILQ_FIRST(&map->bpages) != NULL ||
+ STAILQ_FIRST(&map->slist) != NULL) {
+ CTR3(KTR_BUSDMA, "%s: tag %p error %d",
+ __func__, dmat, EBUSY);
+ return (EBUSY);
+ }
+ if (dmat->bounce_zone)
+ dmat->bounce_zone->map_count--;
+ free(map, M_DEVBUF);
+ dmat->map_count--;
+ CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
+ return (0);
+}
+
+
+/*
+ * Allocate a piece of memory that can be efficiently mapped into
+ * bus device space based on the constraints lited in the dma tag.
+ * A dmamap to for use with dmamap_load is also allocated.
+ */
+int
+bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
+ bus_dmamap_t *mapp)
+{
+ int mflags, len;
+
+ if (flags & BUS_DMA_NOWAIT)
+ mflags = M_NOWAIT;
+ else
+ mflags = M_WAITOK;
+
+ /* ARM non-snooping caches need a map for the VA cache sync structure */
+
+ *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
+ M_NOWAIT | M_ZERO);
+ if (*mapp == NULL) {
+ CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
+ __func__, dmat, dmat->flags, ENOMEM);
+ return (ENOMEM);
+ }
+
+ STAILQ_INIT(&((*mapp)->slist));
+
+ if (dmat->segments == NULL) {
+ dmat->segments = (bus_dma_segment_t *)malloc(
+ sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
+ mflags);
+ if (dmat->segments == NULL) {
+ CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
+ __func__, dmat, dmat->flags, ENOMEM);
+ free(*mapp, M_DEVBUF);
+ *mapp = NULL;
+ return (ENOMEM);
+ }
+ }
+
+ if (flags & BUS_DMA_ZERO)
+ mflags |= M_ZERO;
+
+ /*
+ * XXX:
+ * (dmat->alignment < dmat->maxsize) is just a quick hack; the exact
+ * alignment guarantees of malloc need to be nailed down, and the
+ * code below should be rewritten to take that into account.
+ *
+ * In the meantime, we'll warn the user if malloc gets it wrong.
+ *
+ * allocate at least a cache line. This should help avoid cache
+ * corruption.
+ */
+ len = max(dmat->maxsize, arm_dcache_align);
+ if (len <= PAGE_SIZE &&
+ (dmat->alignment < len) &&
+ !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) {
+ *vaddr = malloc(len, M_DEVBUF, mflags);
+ } else {
+ /*
+ * XXX Use Contigmalloc until it is merged into this facility
+ * and handles multi-seg allocations. Nobody is doing
+ * multi-seg allocations yet though.
+ * XXX Certain AGP hardware does.
+ */
+ *vaddr = contigmalloc(len, M_DEVBUF, mflags,
+ 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
+ dmat->boundary);
+ }
+ if (*vaddr == NULL) {
+ CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
+ __func__, dmat, dmat->flags, ENOMEM);
+ free(*mapp, M_DEVBUF);
+ *mapp = NULL;
+ return (ENOMEM);
+ } else if ((uintptr_t)*vaddr & (dmat->alignment - 1)) {
+ printf("bus_dmamem_alloc failed to align memory properly.\n");
+ }
+ dmat->map_count++;
+
+ if (flags & BUS_DMA_COHERENT)
+ pmap_change_attr((vm_offset_t)*vaddr, len,
+ BUS_DMA_NOCACHE);
+
+ CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
+ __func__, dmat, dmat->flags, 0);
+ return (0);
+}
+
+/*
+ * Free a piece of memory and it's allociated dmamap, that was allocated
+ * via bus_dmamem_alloc. Make the same choice for free/contigfree.
+ */
+void
+bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
+{
+ int len;
+
+#ifdef mftnotyet
+ pmap_change_attr((vm_offset_t)vaddr, dmat->maxsize, ARM_WRITE_BACK);
+#endif
+ len = max(dmat->maxsize, arm_dcache_align);
+ if (len <= PAGE_SIZE &&
+ (dmat->alignment < len) &&
+ !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr))
+ free(vaddr, M_DEVBUF);
+ else {
+ contigfree(vaddr, len, M_DEVBUF);
+ }
+ dmat->map_count--;
+ free(map, M_DEVBUF);
+ CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
+}
+
+static int
+_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
+ void *buf, bus_size_t buflen, int flags)
+{
+ vm_offset_t vaddr;
+ vm_offset_t vendaddr;
+ bus_addr_t paddr;
+
+ if (map->pagesneeded == 0) {
+ CTR5(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d"
+ " map= %p, pagesneeded= %d",
+ dmat->lowaddr, dmat->boundary, dmat->alignment,
+ map, map->pagesneeded);
+ /*
+ * Count the number of bounce pages
+ * needed in order to complete this transfer
+ */
+ vaddr = (vm_offset_t)buf;
+ vendaddr = (vm_offset_t)buf + buflen;
+
+ while (vaddr < vendaddr) {
+ if (__predict_true(map->pmap == pmap_kernel()))
+ paddr = pmap_kextract(vaddr);
+ else
+ paddr = pmap_extract(map->pmap, vaddr);
+ if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
+ run_filter(dmat, paddr) != 0) {
+ map->pagesneeded++;
+ }
+ vaddr += (PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK));
+
+ }
+ CTR1(KTR_BUSDMA, "pagesneeded= %d", map->pagesneeded);
+ }
+
+ /* Reserve Necessary Bounce Pages */
+ if (map->pagesneeded != 0) {
+ mtx_lock(&bounce_lock);
+ if (flags & BUS_DMA_NOWAIT) {
+ if (reserve_bounce_pages(dmat, map, 0) != 0) {
+ map->pagesneeded = 0;
+ mtx_unlock(&bounce_lock);
+ return (ENOMEM);
+ }
+ } else {
+ if (reserve_bounce_pages(dmat, map, 1) != 0) {
+ /* Queue us for resources */
+ map->dmat = dmat;
+ map->buf = buf;
+ map->buflen = buflen;
+ STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
+ map, links);
+ mtx_unlock(&bounce_lock);
+ return (EINPROGRESS);
+ }
+ }
+ mtx_unlock(&bounce_lock);
+ }
+
+ return (0);
+}
+
+/*
+ * Utility function to load a linear buffer. lastaddrp holds state
+ * between invocations (for multiple-buffer loads). segp contains
+ * the starting segment on entrace, and the ending segment on exit.
+ * first indicates if this is the first invocation of this function.
+ */
+static __inline int
+_bus_dmamap_load_buffer(bus_dma_tag_t dmat,
+ bus_dmamap_t map,
+ void *buf, bus_size_t buflen,
+ int flags,
+ bus_addr_t *lastaddrp,
+ bus_dma_segment_t *segs,
+ int *segp,
+ int first)
+{
+ bus_size_t sgsize;
+ bus_addr_t curaddr, lastaddr, baddr, bmask;
+ vm_offset_t vaddr;
+ struct sync_list *sl;
+ int seg, error;
+
+ if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
+ error = _bus_dmamap_count_pages(dmat, map, buf, buflen, flags);
+ if (error)
+ return (error);
+ }
+
+ sl = NULL;
+ vaddr = (vm_offset_t)buf;
+ lastaddr = *lastaddrp;
+ bmask = ~(dmat->boundary - 1);
+
+ for (seg = *segp; buflen > 0 ; ) {
+ /*
+ * Get the physical address for this segment.
+ */
+ if (__predict_true(map->pmap == pmap_kernel()))
+ curaddr = pmap_kextract(vaddr);
+ else
+ curaddr = pmap_extract(map->pmap, vaddr);
+
+ /*
+ * Compute the segment size, and adjust counts.
+ */
+ sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
+ if (sgsize > dmat->maxsegsz)
+ sgsize = dmat->maxsegsz;
+ if (buflen < sgsize)
+ sgsize = buflen;
+
+ /*
+ * Make sure we don't cross any boundaries.
+ */
+ if (dmat->boundary > 0) {
+ baddr = (curaddr + dmat->boundary) & bmask;
+ if (sgsize > (baddr - curaddr))
+ sgsize = (baddr - curaddr);
+ }
+
+ if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
+ map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
+ curaddr = add_bounce_page(dmat, map, vaddr, sgsize);
+ } else {
+ /* add_sync_list(dmat, map, vaddr, sgsize, cflag); */
+ sl = (struct sync_list *)malloc(sizeof(struct sync_list),
+ M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (sl == NULL)
+ goto cleanup;
+ STAILQ_INSERT_TAIL(&(map->slist), sl, slinks);
+ sl->vaddr = vaddr;
+ sl->datacount = sgsize;
+ }
+
+
+ if (dmat->ranges) {
+ struct arm32_dma_range *dr;
+
+ dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges,
+ curaddr);
+ if (dr == NULL) {
+ _bus_dmamap_unload(dmat, map);
+ return (EINVAL);
+ }
+ /*
+ * In a valid DMA range. Translate the physical
+ * memory address to an address in the DMA window.
+ */
+ curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase;
+ }
+
+ /*
+ * Insert chunk into a segment, coalescing with
+ * previous segment if possible.
+ */
+ if (first) {
+ segs[seg].ds_addr = curaddr;
+ segs[seg].ds_len = sgsize;
+ first = 0;
+ } else {
+ if (curaddr == lastaddr &&
+ (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
+ (dmat->boundary == 0 ||
+ (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
+ segs[seg].ds_len += sgsize;
+ else {
+ if (++seg >= dmat->nsegments)
+ break;
+ segs[seg].ds_addr = curaddr;
+ segs[seg].ds_len = sgsize;
+ }
+ }
+
+ lastaddr = curaddr + sgsize;
+ vaddr += sgsize;
+ buflen -= sgsize;
+ }
+
+ *segp = seg;
+ *lastaddrp = lastaddr;
+cleanup:
+ /*
+ * Did we fit?
+ */
+ if (buflen != 0) {
+ _bus_dmamap_unload(dmat, map);
+ return(EFBIG); /* XXX better return value here? */
+ }
+ return (0);
+}
+
+/*
+ * Map the buffer buf into bus space using the dmamap map.
+ */
+int
+bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
+ bus_size_t buflen, bus_dmamap_callback_t *callback,
+ void *callback_arg, int flags)
+{
+ bus_addr_t lastaddr = 0;
+ int error, nsegs = 0;
+
+ flags |= BUS_DMA_WAITOK;
+ map->callback = callback;
+ map->callback_arg = callback_arg;
+ map->pmap = kernel_pmap;
+
+ error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, flags,
+ &lastaddr, dmat->segments, &nsegs, 1);
+
+ CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
+ __func__, dmat, dmat->flags, error, nsegs + 1);
+
+ if (error == EINPROGRESS) {
+ return (error);
+ }
+
+ if (error)
+ (*callback)(callback_arg, dmat->segments, 0, error);
+ else
+ (*callback)(callback_arg, dmat->segments, nsegs + 1, 0);
+
+ /*
+ * Return ENOMEM to the caller so that it can pass it up the stack.
+ * This error only happens when NOWAIT is set, so deferal is disabled.
+ */
+ if (error == ENOMEM)
+ return (error);
+
+ return (0);
+}
+
+
+/*
+ * Like _bus_dmamap_load(), but for mbufs.
+ */
+static __inline int
+_bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
+ struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs,
+ int flags)
+{
+ int error;
+
+ M_ASSERTPKTHDR(m0);
+ map->pmap = kernel_pmap;
+
+ flags |= BUS_DMA_NOWAIT;
+ *nsegs = 0;
+ error = 0;
+ if (m0->m_pkthdr.len <= dmat->maxsize) {
+ int first = 1;
+ bus_addr_t lastaddr = 0;
+ struct mbuf *m;
+
+ for (m = m0; m != NULL && error == 0; m = m->m_next) {
+ if (m->m_len > 0) {
+ error = _bus_dmamap_load_buffer(dmat, map,
+ m->m_data, m->m_len,
+ flags, &lastaddr,
+ segs, nsegs, first);
+ first = 0;
+ }
+ }
+ } else {
+ error = EINVAL;
+ }
+
+ /* XXX FIXME: Having to increment nsegs is really annoying */
+ ++*nsegs;
+ CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
+ __func__, dmat, dmat->flags, error, *nsegs);
+ return (error);
+}
+
+int
+bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
+ struct mbuf *m0,
+ bus_dmamap_callback2_t *callback, void *callback_arg,
+ int flags)
+{
+ int nsegs, error;
+
+ error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, dmat->segments, &nsegs,
+ flags);
+
+ if (error) {
+ /* force "no valid mappings" in callback */
+ (*callback)(callback_arg, dmat->segments, 0, 0, error);
+ } else {
+ (*callback)(callback_arg, dmat->segments,
+ nsegs, m0->m_pkthdr.len, error);
+ }
+ CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
+ __func__, dmat, dmat->flags, error, nsegs);
+
+ return (error);
+}
+
+int
+bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
+ struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs,
+ int flags)
+{
+ return (_bus_dmamap_load_mbuf_sg(dmat, map, m0, segs, nsegs, flags));
+}
+
+/*
+ * Like _bus_dmamap_load(), but for uios.
+ */
+int
+bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
+ struct uio *uio,
+ bus_dmamap_callback2_t *callback, void *callback_arg,
+ int flags)
+{
+ bus_addr_t lastaddr;
+ int nsegs, error, first, i;
+ bus_size_t resid;
+ struct iovec *iov;
+
+ flags |= BUS_DMA_NOWAIT;
+ resid = uio->uio_resid;
+ iov = uio->uio_iov;
+
+ if (uio->uio_segflg == UIO_USERSPACE) {
+ KASSERT(uio->uio_td != NULL,
+ ("bus_dmamap_load_uio: USERSPACE but no proc"));
+ map->pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace);
+ } else
+ map->pmap = kernel_pmap;
+
+ nsegs = 0;
+ error = 0;
+ first = 1;
+ lastaddr = (bus_addr_t) 0;
+ for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
+ /*
+ * Now at the first iovec to load. Load each iovec
+ * until we have exhausted the residual count.
+ */
+ bus_size_t minlen =
+ resid < iov[i].iov_len ? resid : iov[i].iov_len;
*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
More information about the svn-src-projects
mailing list