Page MenuHomeFreeBSD

D20181.1777293940.diff
No OneTemporary

Size
11 KB
Referenced Files
None
Subscribers
None

D20181.1777293940.diff

Index: head/sys/arm64/arm64/busdma_bounce.c
===================================================================
--- head/sys/arm64/arm64/busdma_bounce.c
+++ head/sys/arm64/arm64/busdma_bounce.c
@@ -152,6 +152,8 @@
vm_offset_t vaddr, bus_addr_t addr, bus_size_t size);
static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
+static bool _bus_dmamap_pagesneeded(bus_dma_tag_t dmat, vm_paddr_t buf,
+ bus_size_t buflen, int *pagesneeded);
static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
pmap_t pmap, void *buf, bus_size_t buflen, int flags);
static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
@@ -271,6 +273,15 @@
return (error);
}
+static bool
+bounce_bus_dma_id_mapped(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen)
+{
+
+ if ((dmat->bounce_flags & BF_COULD_BOUNCE) == 0)
+ return (true);
+ return (!_bus_dmamap_pagesneeded(dmat, buf, buflen, NULL));
+}
+
static bus_dmamap_t
alloc_dmamap(bus_dma_tag_t dmat, int flags)
{
@@ -539,29 +550,45 @@
dmat->bounce_flags);
}
+static bool
+_bus_dmamap_pagesneeded(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen,
+ int *pagesneeded)
+{
+ bus_addr_t curaddr;
+ bus_size_t sgsize;
+ int count;
+
+ /*
+ * Count the number of bounce pages needed in order to
+ * complete this transfer
+ */
+ count = 0;
+ curaddr = buf;
+ while (buflen != 0) {
+ sgsize = MIN(buflen, dmat->common.maxsegsz);
+ if (bus_dma_run_filter(&dmat->common, curaddr)) {
+ sgsize = MIN(sgsize,
+ PAGE_SIZE - (curaddr & PAGE_MASK));
+ if (pagesneeded == NULL)
+ return (true);
+ count++;
+ }
+ curaddr += sgsize;
+ buflen -= sgsize;
+ }
+
+ if (pagesneeded != NULL)
+ *pagesneeded = count;
+ return (count != 0);
+}
+
static void
_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
bus_size_t buflen, int flags)
{
- bus_addr_t curaddr;
- bus_size_t sgsize;
if ((map->flags & DMAMAP_COULD_BOUNCE) != 0 && map->pagesneeded == 0) {
- /*
- * Count the number of bounce pages
- * needed in order to complete this transfer
- */
- curaddr = buf;
- while (buflen != 0) {
- sgsize = MIN(buflen, dmat->common.maxsegsz);
- if (bus_dma_run_filter(&dmat->common, curaddr)) {
- sgsize = MIN(sgsize,
- PAGE_SIZE - (curaddr & PAGE_MASK));
- map->pagesneeded++;
- }
- curaddr += sgsize;
- buflen -= sgsize;
- }
+ _bus_dmamap_pagesneeded(dmat, buf, buflen, &map->pagesneeded);
CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
}
}
@@ -1316,6 +1343,7 @@
struct bus_dma_impl bus_dma_bounce_impl = {
.tag_create = bounce_bus_dma_tag_create,
.tag_destroy = bounce_bus_dma_tag_destroy,
+ .id_mapped = bounce_bus_dma_id_mapped,
.map_create = bounce_bus_dmamap_create,
.map_destroy = bounce_bus_dmamap_destroy,
.mem_alloc = bounce_bus_dmamem_alloc,
Index: head/sys/arm64/include/bus_dma.h
===================================================================
--- head/sys/arm64/include/bus_dma.h
+++ head/sys/arm64/include/bus_dma.h
@@ -9,6 +9,18 @@
#include <machine/bus_dma_impl.h>
/*
+ * Is DMA address 1:1 mapping of physical address
+ */
+static inline bool
+bus_dma_id_mapped(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen)
+{
+ struct bus_dma_tag_common *tc;
+
+ tc = (struct bus_dma_tag_common *)dmat;
+ return (tc->impl->id_mapped(dmat, buf, buflen));
+}
+
+/*
* Allocate a handle for mapping from kva/uva/physical
* address space into bus device space.
*/
Index: head/sys/arm64/include/bus_dma_impl.h
===================================================================
--- head/sys/arm64/include/bus_dma_impl.h
+++ head/sys/arm64/include/bus_dma_impl.h
@@ -58,6 +58,7 @@
bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
void *lockfuncarg, bus_dma_tag_t *dmat);
int (*tag_destroy)(bus_dma_tag_t dmat);
+ bool (*id_mapped)(bus_dma_tag_t, vm_paddr_t, bus_size_t);
int (*map_create)(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp);
int (*map_destroy)(bus_dma_tag_t dmat, bus_dmamap_t map);
int (*mem_alloc)(bus_dma_tag_t dmat, void** vaddr, int flags,
Index: head/sys/compat/linuxkpi/common/src/linux_pci.c
===================================================================
--- head/sys/compat/linuxkpi/common/src/linux_pci.c
+++ head/sys/compat/linuxkpi/common/src/linux_pci.c
@@ -520,6 +520,7 @@
return (mem);
}
+#if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
dma_addr_t
linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len)
{
@@ -530,6 +531,15 @@
priv = dev->dma_priv;
+ /*
+ * If the resultant mapping will be entirely 1:1 with the
+ * physical address, short-circuit the remainder of the
+ * bus_dma API. This avoids tracking collisions in the pctrie
+ * with the additional benefit of reducing overhead.
+ */
+ if (bus_dma_id_mapped(priv->dmat, phys, len))
+ return (phys);
+
obj = uma_zalloc(linux_dma_obj_zone, 0);
DMA_PRIV_LOCK(priv);
@@ -562,7 +572,15 @@
DMA_PRIV_UNLOCK(priv);
return (obj->dma_addr);
}
+#else
+dma_addr_t
+linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len)
+{
+ return (phys);
+}
+#endif
+#if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
void
linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len)
{
@@ -571,6 +589,9 @@
priv = dev->dma_priv;
+ if (pctrie_is_empty(&priv->ptree))
+ return;
+
DMA_PRIV_LOCK(priv);
obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr);
if (obj == NULL) {
@@ -584,6 +605,12 @@
uma_zfree(linux_dma_obj_zone, obj);
}
+#else
+void
+linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len)
+{
+}
+#endif
int
linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents,
Index: head/sys/sys/bus_dma.h
===================================================================
--- head/sys/sys/bus_dma.h
+++ head/sys/sys/bus_dma.h
@@ -67,7 +67,9 @@
#ifndef _BUS_DMA_H_
#define _BUS_DMA_H_
+#ifdef _KERNEL
#include <sys/_bus_dma.h>
+#endif
/*
* Machine independent interface for mapping physical addresses to peripheral
@@ -133,6 +135,7 @@
bus_size_t ds_len; /* length of transfer */
} bus_dma_segment_t;
+#ifdef _KERNEL
/*
* A function that returns 1 if the address cannot be accessed by
* a device and 0 if it can be.
@@ -302,5 +305,6 @@
BUS_DMAMAP_OP void bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t dmamap);
#undef BUS_DMAMAP_OP
+#endif /* _KERNEL */
#endif /* _BUS_DMA_H_ */
Index: head/sys/x86/include/bus_dma.h
===================================================================
--- head/sys/x86/include/bus_dma.h
+++ head/sys/x86/include/bus_dma.h
@@ -36,6 +36,18 @@
#include <x86/busdma_impl.h>
/*
+ * Is DMA address 1:1 mapping of physical address
+ */
+static inline bool
+bus_dma_id_mapped(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen)
+{
+ struct bus_dma_tag_common *tc;
+
+ tc = (struct bus_dma_tag_common *)dmat;
+ return (tc->impl->id_mapped(dmat, buf, buflen));
+}
+
+/*
* Allocate a handle for mapping from kva/uva/physical
* address space into bus device space.
*/
Index: head/sys/x86/include/busdma_impl.h
===================================================================
--- head/sys/x86/include/busdma_impl.h
+++ head/sys/x86/include/busdma_impl.h
@@ -62,6 +62,7 @@
void *lockfuncarg, bus_dma_tag_t *dmat);
int (*tag_destroy)(bus_dma_tag_t dmat);
int (*tag_set_domain)(bus_dma_tag_t);
+ bool (*id_mapped)(bus_dma_tag_t, vm_paddr_t, bus_size_t);
int (*map_create)(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp);
int (*map_destroy)(bus_dma_tag_t dmat, bus_dmamap_t map);
int (*mem_alloc)(bus_dma_tag_t dmat, void** vaddr, int flags,
Index: head/sys/x86/iommu/busdma_dmar.c
===================================================================
--- head/sys/x86/iommu/busdma_dmar.c
+++ head/sys/x86/iommu/busdma_dmar.c
@@ -365,6 +365,13 @@
return (error);
}
+static bool
+dmar_bus_dma_id_mapped(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen)
+{
+
+ return (false);
+}
+
static int
dmar_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
{
@@ -857,6 +864,7 @@
.tag_create = dmar_bus_dma_tag_create,
.tag_destroy = dmar_bus_dma_tag_destroy,
.tag_set_domain = dmar_bus_dma_tag_set_domain,
+ .id_mapped = dmar_bus_dma_id_mapped,
.map_create = dmar_bus_dmamap_create,
.map_destroy = dmar_bus_dmamap_destroy,
.mem_alloc = dmar_bus_dmamem_alloc,
Index: head/sys/x86/x86/busdma_bounce.c
===================================================================
--- head/sys/x86/x86/busdma_bounce.c
+++ head/sys/x86/x86/busdma_bounce.c
@@ -141,6 +141,8 @@
static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
vm_offset_t vaddr, vm_paddr_t addr1, vm_paddr_t addr2, bus_size_t size);
static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
+static bool _bus_dmamap_pagesneeded(bus_dma_tag_t dmat, vm_paddr_t buf,
+ bus_size_t buflen, int *pagesneeded);
static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
pmap_t pmap, void *buf, bus_size_t buflen, int flags);
static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
@@ -223,6 +225,15 @@
return (error);
}
+static bool
+bounce_bus_dma_id_mapped(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen)
+{
+
+ if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) == 0)
+ return (true);
+ return (!_bus_dmamap_pagesneeded(dmat, buf, buflen, NULL));
+}
+
/*
* Update the domain for the tag. We may need to reallocate the zone and
* bounce pages.
@@ -501,29 +512,45 @@
dmat->bounce_flags);
}
+static bool
+_bus_dmamap_pagesneeded(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen,
+ int *pagesneeded)
+{
+ vm_paddr_t curaddr;
+ bus_size_t sgsize;
+ int count;
+
+ /*
+ * Count the number of bounce pages needed in order to
+ * complete this transfer
+ */
+ count = 0;
+ curaddr = buf;
+ while (buflen != 0) {
+ sgsize = MIN(buflen, dmat->common.maxsegsz);
+ if (bus_dma_run_filter(&dmat->common, curaddr)) {
+ sgsize = MIN(sgsize,
+ PAGE_SIZE - (curaddr & PAGE_MASK));
+ if (pagesneeded == NULL)
+ return (true);
+ count++;
+ }
+ curaddr += sgsize;
+ buflen -= sgsize;
+ }
+
+ if (pagesneeded != NULL)
+ *pagesneeded = count;
+ return (count != 0);
+}
+
static void
_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
bus_size_t buflen, int flags)
{
- vm_paddr_t curaddr;
- bus_size_t sgsize;
if (map != &nobounce_dmamap && map->pagesneeded == 0) {
- /*
- * Count the number of bounce pages
- * needed in order to complete this transfer
- */
- curaddr = buf;
- while (buflen != 0) {
- sgsize = MIN(buflen, dmat->common.maxsegsz);
- if (bus_dma_run_filter(&dmat->common, curaddr)) {
- sgsize = MIN(sgsize,
- PAGE_SIZE - (curaddr & PAGE_MASK));
- map->pagesneeded++;
- }
- curaddr += sgsize;
- buflen -= sgsize;
- }
+ _bus_dmamap_pagesneeded(dmat, buf, buflen, &map->pagesneeded);
CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
}
}
@@ -1305,6 +1332,7 @@
.tag_create = bounce_bus_dma_tag_create,
.tag_destroy = bounce_bus_dma_tag_destroy,
.tag_set_domain = bounce_bus_dma_tag_set_domain,
+ .id_mapped = bounce_bus_dma_id_mapped,
.map_create = bounce_bus_dmamap_create,
.map_destroy = bounce_bus_dmamap_destroy,
.mem_alloc = bounce_bus_dmamem_alloc,
Index: head/usr.sbin/camdd/camdd.c
===================================================================
--- head/usr.sbin/camdd/camdd.c
+++ head/usr.sbin/camdd/camdd.c
@@ -51,7 +51,6 @@
#include <sys/time.h>
#include <sys/uio.h>
#include <vm/vm.h>
-#include <machine/bus.h>
#include <sys/bus.h>
#include <sys/bus_dma.h>
#include <sys/mtio.h>

File Metadata

Mime Type
text/plain
Expires
Mon, Apr 27, 12:45 PM (18 h, 24 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
28439895
Default Alt Text
D20181.1777293940.diff (11 KB)

Event Timeline