svn commit: r343087 - head/sys/x86/x86
Konstantin Belousov
kib at FreeBSD.org
Wed Jan 16 06:10:56 UTC 2019
Author: kib
Date: Wed Jan 16 06:10:55 2019
New Revision: 343087
URL: https://svnweb.freebsd.org/changeset/base/343087
Log:
Style(9) fixes for x86/busdma_bounce.c.
Remove extra parentheses. Adjust indents and lines fill.
Sponsored by: The FreeBSD Foundation
MFC after: 1 week
Modified:
head/sys/x86/x86/busdma_bounce.c
Modified: head/sys/x86/x86/busdma_bounce.c
==============================================================================
--- head/sys/x86/x86/busdma_bounce.c Wed Jan 16 05:51:03 2019 (r343086)
+++ head/sys/x86/x86/busdma_bounce.c Wed Jan 16 06:10:55 2019 (r343087)
@@ -137,19 +137,16 @@ static void init_bounce_pages(void *dummy);
static int alloc_bounce_zone(bus_dma_tag_t dmat);
static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
- int commit);
+ int commit);
static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
- vm_offset_t vaddr, bus_addr_t addr1,
- bus_addr_t addr2, bus_size_t size);
+ vm_offset_t vaddr, bus_addr_t addr1, bus_addr_t addr2, bus_size_t size);
static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
- pmap_t pmap, void *buf, bus_size_t buflen,
- int flags);
+ pmap_t pmap, void *buf, bus_size_t buflen, int flags);
static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
- vm_paddr_t buf, bus_size_t buflen,
- int flags);
+ vm_paddr_t buf, bus_size_t buflen, int flags);
static int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
- int flags);
+ int flags);
static int
bounce_bus_dma_zone_setup(bus_dma_tag_t dmat)
@@ -202,15 +199,15 @@ bounce_bus_dma_tag_create(bus_dma_tag_t parent, bus_si
newtag->map_count = 0;
newtag->segments = NULL;
- if (parent != NULL && ((newtag->common.filter != NULL) ||
- ((parent->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0)))
+ if (parent != NULL && (newtag->common.filter != NULL ||
+ (parent->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0))
newtag->bounce_flags |= BUS_DMA_COULD_BOUNCE;
if (newtag->common.lowaddr < ptoa((vm_paddr_t)Maxmem) ||
newtag->common.alignment > 1)
newtag->bounce_flags |= BUS_DMA_COULD_BOUNCE;
- if (((newtag->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) &&
+ if ((newtag->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0 &&
(flags & BUS_DMA_ALLOCNOW) != 0)
error = bounce_bus_dma_zone_setup(newtag);
else
@@ -309,7 +306,7 @@ bounce_bus_dmamap_create(bus_dma_tag_t dmat, int flags
* exclusion region, a data alignment that is stricter than 1, and/or
* an active address boundary.
*/
- if (dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) {
+ if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) {
/* Must bounce */
if (dmat->bounce_zone == NULL) {
if ((error = alloc_bounce_zone(dmat)) != 0)
@@ -448,14 +445,15 @@ bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vad
*
* In the meantime warn the user if malloc gets it wrong.
*/
- if ((dmat->common.maxsize <= PAGE_SIZE) &&
- (dmat->common.alignment <= dmat->common.maxsize) &&
+ if (dmat->common.maxsize <= PAGE_SIZE &&
+ dmat->common.alignment <= dmat->common.maxsize &&
dmat->common.lowaddr >= ptoa((vm_paddr_t)Maxmem) &&
attr == VM_MEMATTR_DEFAULT) {
*vaddr = malloc_domainset(dmat->common.maxsize, M_DEVBUF,
DOMAINSET_PREF(dmat->common.domain), mflags);
} else if (dmat->common.nsegments >=
- howmany(dmat->common.maxsize, MIN(dmat->common.maxsegsz, PAGE_SIZE)) &&
+ howmany(dmat->common.maxsize, MIN(dmat->common.maxsegsz,
+ PAGE_SIZE)) &&
dmat->common.alignment <= PAGE_SIZE &&
(dmat->common.boundary % PAGE_SIZE) == 0) {
/* Page-based multi-segment allocations allowed */
@@ -512,7 +510,7 @@ _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_
bus_addr_t curaddr;
bus_size_t sgsize;
- if ((map != &nobounce_dmamap && map->pagesneeded == 0)) {
+ if (map != &nobounce_dmamap && map->pagesneeded == 0) {
/*
* Count the number of bounce pages
* needed in order to complete this transfer
@@ -541,7 +539,7 @@ _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap
bus_addr_t paddr;
bus_size_t sg_len;
- if ((map != &nobounce_dmamap && map->pagesneeded == 0)) {
+ if (map != &nobounce_dmamap && map->pagesneeded == 0) {
CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
"alignment= %d", dmat->common.lowaddr,
ptoa((vm_paddr_t)Maxmem),
@@ -580,7 +578,7 @@ _bus_dmamap_count_ma(bus_dma_tag_t dmat, bus_dmamap_t
int page_index;
vm_paddr_t paddr;
- if ((map != &nobounce_dmamap && map->pagesneeded == 0)) {
+ if (map != &nobounce_dmamap && map->pagesneeded == 0) {
CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
"alignment= %d", dmat->common.lowaddr,
ptoa((vm_paddr_t)Maxmem),
@@ -718,7 +716,7 @@ bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dm
while (buflen > 0) {
curaddr = buf;
sgsize = MIN(buflen, dmat->common.maxsegsz);
- if (((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) &&
+ if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0 &&
map->pagesneeded != 0 &&
bus_dma_run_filter(&dmat->common, curaddr)) {
sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK));
@@ -786,7 +784,7 @@ bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_
*/
max_sgsize = MIN(buflen, dmat->common.maxsegsz);
sgsize = PAGE_SIZE - (curaddr & PAGE_MASK);
- if (((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) &&
+ if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0 &&
map->pagesneeded != 0 &&
bus_dma_run_filter(&dmat->common, curaddr)) {
sgsize = roundup2(sgsize, dmat->common.alignment);
@@ -853,7 +851,7 @@ bounce_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmam
paddr = VM_PAGE_TO_PHYS(ma[page_index]) + ma_offs;
max_sgsize = MIN(buflen, dmat->common.maxsegsz);
sgsize = PAGE_SIZE - ma_offs;
- if (((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) &&
+ if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0 &&
map->pagesneeded != 0 &&
bus_dma_run_filter(&dmat->common, paddr)) {
sgsize = roundup2(sgsize, dmat->common.alignment);
@@ -1066,9 +1064,9 @@ alloc_bounce_zone(bus_dma_tag_t dmat)
/* Check to see if we already have a suitable zone */
STAILQ_FOREACH(bz, &bounce_zone_list, links) {
- if ((dmat->common.alignment <= bz->alignment) &&
- (dmat->common.lowaddr >= bz->lowaddr) &&
- (dmat->common.domain == bz->domain)) {
+ if (dmat->common.alignment <= bz->alignment &&
+ dmat->common.lowaddr >= bz->lowaddr &&
+ dmat->common.domain == bz->domain) {
dmat->bounce_zone = bz;
return (0);
}
@@ -1196,7 +1194,7 @@ reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t
static bus_addr_t
add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
- bus_addr_t addr1, bus_addr_t addr2, bus_size_t size)
+ bus_addr_t addr1, bus_addr_t addr2, bus_size_t size)
{
struct bounce_zone *bz;
struct bounce_page *bpage;
More information about the svn-src-head
mailing list