PERFORCE change 131266 for review
Steve Wise
swise at FreeBSD.org
Wed Dec 19 14:53:37 PST 2007
http://perforce.freebsd.org/chv.cgi?CH=131266
Change 131266 by swise at swise:vic10:iwarp on 2007/12/19 22:52:53
Lots of rdma fixes...
- passive side MPA negotiation working
- idrs fixed
- udb bar region mapped
- gid cache correctly populated
Affected files ...
.. //depot/projects/iwarp/sys/contrib/rdma/rdma_cache.c#3 edit
.. //depot/projects/iwarp/sys/contrib/rdma/rdma_cma.c#9 edit
.. //depot/projects/iwarp/sys/contrib/rdma/rdma_iwcm.c#6 edit
.. //depot/projects/iwarp/sys/dev/cxgb/cxgb_adapter.h#5 edit
.. //depot/projects/iwarp/sys/dev/cxgb/cxgb_main.c#5 edit
.. //depot/projects/iwarp/sys/dev/cxgb/cxgb_offload.c#6 edit
.. //depot/projects/iwarp/sys/dev/cxgb/ulp/iw_cxgb/iw_cxgb_cxio_hal.c#3 edit
.. //depot/projects/iwarp/sys/dev/cxgb/ulp/iw_cxgb/iw_cxgb_iwch_cm.c#3 edit
.. //depot/projects/iwarp/sys/dev/cxgb/ulp/iw_cxgb/iw_cxgb_iwch_provider.c#3 edit
.. //depot/projects/iwarp/sys/sys/linux_compat.h#8 edit
.. //depot/projects/iwarp/sys/sys/linux_pci.h#2 edit
Differences ...
==== //depot/projects/iwarp/sys/contrib/rdma/rdma_cache.c#3 (text+ko) ====
@@ -129,7 +129,7 @@
for (p = 0; p <= end_port(device) - start_port(device); ++p) {
cache = device->cache.gid_cache[p];
for (i = 0; i < cache->table_len; ++i) {
- if (!memcmp(gid, &cache->table[i], sizeof *gid)) {
+ if (!memcmp(gid, &cache->table[i], 6)) { /* XXX */
*port_num = p + start_port(device);
if (index)
*index = i;
==== //depot/projects/iwarp/sys/contrib/rdma/rdma_cma.c#9 (text+ko) ====
@@ -1307,6 +1307,7 @@
struct rdma_cm_event event;
int ret;
struct ifaddr *ifa;
+ uint16_t port;
listen_id = cm_id->context;
if (cma_disable_remove(listen_id, CMA_LISTEN))
@@ -1326,7 +1327,10 @@
mtx_unlock_spin(&conn_id->lock);
conn_id->state = CMA_CONNECT;
+ port = iw_event->local_addr.sin_port;
+ iw_event->local_addr.sin_port = 0;
ifa = ifa_ifwithaddr((struct sockaddr *)&iw_event->local_addr);
+ iw_event->local_addr.sin_port = port;
if (!ifa) {
ret = EADDRNOTAVAIL;
cma_enable_remove(conn_id);
@@ -2952,7 +2956,6 @@
switch (cmd) {
case MOD_LOAD:
printf("Loading rdma_cma.\n");
-
cma_init();
break;
case MOD_QUIESCE:
==== //depot/projects/iwarp/sys/contrib/rdma/rdma_iwcm.c#6 (text+ko) ====
==== //depot/projects/iwarp/sys/dev/cxgb/cxgb_adapter.h#5 (text+ko) ====
@@ -341,6 +341,8 @@
/* PCI register resources */
int regs_rid;
struct resource *regs_res;
+ int udbs_rid;
+ struct resource *udbs_res;
bus_space_handle_t bh;
bus_space_tag_t bt;
bus_size_t mmio_len;
==== //depot/projects/iwarp/sys/dev/cxgb/cxgb_main.c#5 (text+ko) ====
@@ -448,9 +448,16 @@
sc->regs_rid = PCIR_BAR(0);
if ((sc->regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
&sc->regs_rid, RF_ACTIVE)) == NULL) {
- device_printf(dev, "Cannot allocate BAR\n");
+ device_printf(dev, "Cannot allocate BAR region 0\n");
return (ENXIO);
}
+ sc->udbs_rid = PCIR_BAR(2);
+ if ((sc->udbs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
+ &sc->udbs_rid, RF_ACTIVE)) == NULL) {
+ device_printf(dev, "Cannot allocate BAR region 1\n");
+ error = ENXIO;
+ goto out;
+ }
snprintf(sc->lockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb controller lock %d",
device_get_unit(dev));
@@ -706,6 +713,10 @@
cxgb_offload_exit();
+ if (sc->udbs_res != NULL)
+ bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->udbs_rid,
+ sc->udbs_res);
+
if (sc->regs_res != NULL)
bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->regs_rid,
sc->regs_res);
==== //depot/projects/iwarp/sys/dev/cxgb/cxgb_offload.c#6 (text+ko) ====
@@ -294,15 +294,15 @@
case RDMA_GET_PARAMS: {
struct rdma_info *req = data;
- req->udbell_physbase = rman_get_start(adapter->regs_res);
- req->udbell_len = rman_get_size(adapter->regs_res);
+ req->udbell_physbase = rman_get_start(adapter->udbs_res);
+ req->udbell_len = rman_get_size(adapter->udbs_res);
req->tpt_base = t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT);
req->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT);
req->pbl_base = t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT);
req->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT);
req->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT);
req->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT);
- req->kdb_addr = (void *)(rman_get_start(adapter->regs_res) + A_SG_KDOORBELL);
+ req->kdb_addr = (void *)((unsigned long)rman_get_virtual(adapter->regs_res) + A_SG_KDOORBELL);
req->pdev = adapter->dev;
break;
}
==== //depot/projects/iwarp/sys/dev/cxgb/ulp/iw_cxgb/iw_cxgb_cxio_hal.c#3 (text+ko) ====
==== //depot/projects/iwarp/sys/dev/cxgb/ulp/iw_cxgb/iw_cxgb_iwch_cm.c#3 (text+ko) ====
@@ -377,30 +377,16 @@
__be32 peer_ip, __be16 local_port,
__be16 peer_port, u8 tos)
{
-#ifdef netyet
- struct rtable *rt;
- struct flowi fl = {
- .oif = 0,
- .nl_u = {
- .ip4_u = {
- .daddr = peer_ip,
- .saddr = local_ip,
- .tos = tos}
- },
- .proto = IPPROTO_TCP,
- .uli_u = {
- .ports = {
- .sport = local_port,
- .dport = peer_port}
- }
- };
-
- if (ip_route_output_flow(&rt, &fl, NULL, 0))
- return NULL;
- return rt;
-
-#endif
- return NULL;
+ struct route iproute;
+ struct sockaddr_in *dst = (struct sockaddr_in *)&iproute.ro_dst;
+
+ bzero(&iproute, sizeof iproute);
+ dst->sin_family = AF_INET;
+ dst->sin_len = sizeof *dst;
+ dst->sin_addr.s_addr = peer_ip;
+
+ rtalloc(&iproute);
+ return iproute.ro_rt;
}
static unsigned int
@@ -879,13 +865,11 @@
err = (EINVAL);
goto err;
}
-#ifdef notyet
+
/*
* copy the new data into our accumulation buffer.
*/
- skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
- skb->len);
-#endif
+ m_copydata(m, 0, m->m_len, &(ep->mpa_pkt[ep->mpa_pkt_len]));
ep->mpa_pkt_len += m->m_len;
/*
@@ -897,10 +881,12 @@
/* Validate MPA header. */
if (mpa->revision != mpa_rev) {
+ PDBG("%s bad mpa rev %d\n", __FUNCTION__, mpa->revision);
err = EPROTO;
goto err;
}
if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
+ PDBG("%s bad mpa key |%16s|\n", __FUNCTION__, mpa->key);
err = EPROTO;
goto err;
}
@@ -911,6 +897,7 @@
* Fail if there's too much private data.
*/
if (plen > MPA_MAX_PRIVATE_DATA) {
+ PDBG("%s plen too big %d\n", __FUNCTION__, plen);
err = EPROTO;
goto err;
}
@@ -919,6 +906,7 @@
* If plen does not account for pkt size
*/
if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
+ PDBG("%s pkt too big %d\n", __FUNCTION__, ep->mpa_pkt_len);
err = EPROTO;
goto err;
}
@@ -942,6 +930,7 @@
* start reply message including private data. And
* the MPA header is valid.
*/
+ PDBG("%s mpa rpl looks good!\n", __FUNCTION__);
state_set(&ep->com, FPDU_MODE);
ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
ep->mpa_attr.recv_marker_enabled = markers_enabled;
@@ -996,39 +985,39 @@
* then we must fail this connection.
*/
if (ep->mpa_pkt_len + m->m_len > sizeof(ep->mpa_pkt)) {
+ PDBG("%s mpa message too big %d\n", __FUNCTION__, ep->mpa_pkt_len + m->m_len);
abort_connection(ep, m, M_NOWAIT);
return;
}
- PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
-#ifdef notyet
/*
* Copy the new data into our accumulation buffer.
*/
- skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
- skb->len);
-#endif
+ m_copydata(m, 0, m->m_len, &(ep->mpa_pkt[ep->mpa_pkt_len]));
ep->mpa_pkt_len += m->m_len;
/*
* If we don't even have the mpa message, then bail.
* We'll continue process when more data arrives.
*/
- if (ep->mpa_pkt_len < sizeof(*mpa))
+ if (ep->mpa_pkt_len < sizeof(*mpa)) {
+ PDBG("%s not enough header %d...waiting...\n", __FUNCTION__, ep->mpa_pkt_len);
return;
- PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
+ }
mpa = (struct mpa_message *) ep->mpa_pkt;
/*
* Validate MPA Header.
*/
if (mpa->revision != mpa_rev) {
+ PDBG("%s bad mpa rev %d\n", __FUNCTION__, mpa->revision);
abort_connection(ep, m, M_NOWAIT);
return;
}
if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
+ PDBG("%s bad mpa key |%16s|\n", __FUNCTION__, mpa->key);
abort_connection(ep, m, M_NOWAIT);
return;
}
@@ -1039,6 +1028,7 @@
* Fail if there's too much private data.
*/
if (plen > MPA_MAX_PRIVATE_DATA) {
+ PDBG("%s plen too big %d\n", __FUNCTION__, plen);
abort_connection(ep, m, M_NOWAIT);
return;
}
@@ -1047,6 +1037,7 @@
* If plen does not account for pkt size
*/
if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
+ PDBG("%s more data after private data %d\n", __FUNCTION__, ep->mpa_pkt_len);
abort_connection(ep, m, M_NOWAIT);
return;
}
@@ -1055,8 +1046,10 @@
/*
* If we don't have all the pdata yet, then bail.
*/
- if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
+ if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) {
+ PDBG("%s more mpa msg to come %d\n", __FUNCTION__, ep->mpa_pkt_len);
return;
+ }
/*
* If we get here we have accumulated the entire mpa
@@ -1400,6 +1393,7 @@
printf("%s - failed to find route !\n", __FUNCTION__);
goto reject;
}
+ printf("%s ifname |%s|\n", __FUNCTION__, dst->rt_ifp->if_xname);
l2t = t3_l2t_get(tdev, dst, dst->rt_ifp);
if (l2t == NULL) {
printf("%s - failed to allocate l2t entry!\n", __FUNCTION__);
@@ -1417,9 +1411,11 @@
state_set(&child_ep->com, CONNECTING);
child_ep->com.tdev = tdev;
child_ep->com.cm_id = NULL;
+ child_ep->com.local_addr.sin_len = sizeof(struct sockaddr_in);
child_ep->com.local_addr.sin_family = PF_INET;
child_ep->com.local_addr.sin_port = req->local_port;
child_ep->com.local_addr.sin_addr.s_addr = req->local_ip;
+ child_ep->com.remote_addr.sin_len = sizeof(struct sockaddr_in);
child_ep->com.remote_addr.sin_family = PF_INET;
child_ep->com.remote_addr.sin_port = req->peer_port;
child_ep->com.remote_addr.sin_addr.s_addr = req->peer_ip;
==== //depot/projects/iwarp/sys/dev/cxgb/ulp/iw_cxgb/iw_cxgb_iwch_provider.c#3 (text+ko) ====
@@ -97,7 +97,7 @@
static struct ib_ah *iwch_ah_create(struct ib_pd *pd,
struct ib_ah_attr *ah_attr)
{
- return ERR_PTR(ENOSYS);
+ return ERR_PTR(-ENOSYS);
}
static int iwch_ah_destroy(struct ib_ah *ah)
@@ -150,7 +150,7 @@
PDBG("%s ibdev %p\n", __FUNCTION__, ibdev);
context = kzalloc(sizeof(*context), M_WAITOK);
if (!context)
- return ERR_PTR(ENOMEM);
+ return ERR_PTR(-ENOMEM);
cxio_init_ucontext(&rhp->rdev, &context->uctx);
TAILQ_INIT(&context->mmaps);
mtx_init(&context->mmap_lock, "ucontext mmap", NULL, MTX_DEF);
@@ -186,15 +186,15 @@
PDBG("%s ib_dev %p entries %d\n", __FUNCTION__, ibdev, entries);
rhp = to_iwch_dev(ibdev);
chp = kzalloc(sizeof(*chp), M_WAITOK);
- if (!chp)
- return ERR_PTR(ENOMEM);
-
+ if (!chp) {
+ return ERR_PTR(-ENOMEM);
+ }
if (ib_context) {
ucontext = to_iwch_ucontext(ib_context);
if (!t3a_device(rhp)) {
if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) {
cxfree(chp);
- return ERR_PTR(EFAULT);
+ return ERR_PTR(-EFAULT);
}
chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr;
}
@@ -218,7 +218,7 @@
if (cxio_create_cq(&rhp->rdev, &chp->cq)) {
cxfree(chp);
- return ERR_PTR(ENOMEM);
+ return ERR_PTR(-ENOMEM);
}
chp->rhp = rhp;
chp->ibcq.cqe = (1 << chp->cq.size_log2) - 1;
@@ -233,7 +233,7 @@
mm = kmalloc(sizeof *mm, M_WAITOK);
if (!mm) {
iwch_destroy_cq(&chp->ibcq);
- return ERR_PTR(ENOMEM);
+ return ERR_PTR(-ENOMEM);
}
uresp.cqid = chp->cq.cqid;
uresp.size_log2 = chp->cq.size_log2;
@@ -244,7 +244,7 @@
if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
cxfree(mm);
iwch_destroy_cq(&chp->ibcq);
- return ERR_PTR(EFAULT);
+ return ERR_PTR(-EFAULT);
}
mm->key = uresp.key;
mm->addr = virt_to_phys(chp->cq.queue);
@@ -444,18 +444,18 @@
rhp = (struct iwch_dev *) ibdev;
pdid = cxio_hal_get_pdid(rhp->rdev.rscp);
if (!pdid)
- return ERR_PTR(EINVAL);
+ return ERR_PTR(-EINVAL);
php = kzalloc(sizeof(*php), M_WAITOK);
if (!php) {
cxio_hal_put_pdid(rhp->rdev.rscp, pdid);
- return ERR_PTR(ENOMEM);
+ return ERR_PTR(-ENOMEM);
}
php->pdid = pdid;
php->rhp = rhp;
if (context) {
if (ib_copy_to_udata(udata, &php->pdid, sizeof (__u32))) {
iwch_deallocate_pd(&php->ibpd);
- return ERR_PTR(EFAULT);
+ return ERR_PTR(-EFAULT);
}
}
PDBG("%s pdid 0x%0x ptr 0x%p\n", __FUNCTION__, pdid, php);
@@ -509,7 +509,7 @@
mhp = kzalloc(sizeof(*mhp), M_WAITOK);
if (!mhp)
- return ERR_PTR(ENOMEM);
+ return ERR_PTR(-ENOMEM);
/* First check that we have enough alignment */
if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) {
@@ -546,7 +546,7 @@
return &mhp->ibmr;
err:
cxfree(mhp);
- return ERR_PTR(ret);
+ return ERR_PTR(-ret);
}
@@ -636,13 +636,13 @@
rhp = php->rhp;
mhp = kzalloc(sizeof(*mhp), M_WAITOK);
if (!mhp)
- return ERR_PTR(ENOMEM);
+ return ERR_PTR(-ENOMEM);
mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc);
if (IS_ERR(mhp->umem)) {
err = PTR_ERR(mhp->umem);
cxfree(mhp);
- return ERR_PTR(err);
+ return ERR_PTR(-err);
}
shift = ffs(mhp->umem->page_size) - 1;
@@ -701,7 +701,7 @@
err:
ib_umem_release(mhp->umem);
cxfree(mhp);
- return ERR_PTR(err);
+ return ERR_PTR(-err);
}
static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
@@ -735,11 +735,11 @@
rhp = php->rhp;
mhp = kzalloc(sizeof(*mhp), M_WAITOK);
if (!mhp)
- return ERR_PTR(ENOMEM);
+ return ERR_PTR(-ENOMEM);
ret = cxio_allocate_window(&rhp->rdev, &stag, php->pdid);
if (ret) {
cxfree(mhp);
- return ERR_PTR(ret);
+ return ERR_PTR(-ret);
}
mhp->rhp = rhp;
mhp->attr.pdid = php->pdid;
@@ -812,13 +812,13 @@
PDBG("%s ib_pd %p\n", __FUNCTION__, pd);
if (attrs->qp_type != IB_QPT_RC)
- return ERR_PTR(EINVAL);
+ return ERR_PTR(-EINVAL);
php = to_iwch_pd(pd);
rhp = php->rhp;
schp = get_chp(rhp, ((struct iwch_cq *) attrs->send_cq)->cq.cqid);
rchp = get_chp(rhp, ((struct iwch_cq *) attrs->recv_cq)->cq.cqid);
if (!schp || !rchp)
- return ERR_PTR(EINVAL);
+ return ERR_PTR(-EINVAL);
/* The RQT size must be # of entries + 1 rounded up to a power of two */
rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr);
@@ -830,10 +830,10 @@
rqsize = 16;
if (rqsize > T3_MAX_RQ_SIZE)
- return ERR_PTR(EINVAL);
+ return ERR_PTR(-EINVAL);
if (attrs->cap.max_inline_data > T3_MAX_INLINE)
- return ERR_PTR(EINVAL);
+ return ERR_PTR(-EINVAL);
/*
* NOTE: The SQ and total WQ sizes don't need to be
@@ -846,7 +846,7 @@
wqsize, sqsize, rqsize);
qhp = kzalloc(sizeof(*qhp), M_WAITOK);
if (!qhp)
- return ERR_PTR(ENOMEM);
+ return ERR_PTR(-ENOMEM);
qhp->wq.size_log2 = ilog2(wqsize);
qhp->wq.rq_size_log2 = ilog2(rqsize);
qhp->wq.sq_size_log2 = ilog2(sqsize);
@@ -854,7 +854,7 @@
if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq,
ucontext ? &ucontext->uctx : &rhp->rdev.uctx)) {
cxfree(qhp);
- return ERR_PTR(ENOMEM);
+ return ERR_PTR(-ENOMEM);
}
attrs->cap.max_recv_wr = rqsize - 1;
attrs->cap.max_send_wr = sqsize;
@@ -893,14 +893,14 @@
mm1 = kmalloc(sizeof *mm1, M_WAITOK);
if (!mm1) {
iwch_destroy_qp(&qhp->ibqp);
- return ERR_PTR(ENOMEM);
+ return ERR_PTR(-ENOMEM);
}
mm2 = kmalloc(sizeof *mm2, M_WAITOK);
if (!mm2) {
cxfree(mm1);
iwch_destroy_qp(&qhp->ibqp);
- return ERR_PTR(ENOMEM);
+ return ERR_PTR(-ENOMEM);
}
uresp.qpid = qhp->wq.qpid;
@@ -917,7 +917,7 @@
cxfree(mm1);
cxfree(mm2);
iwch_destroy_qp(&qhp->ibqp);
- return ERR_PTR(EFAULT);
+ return ERR_PTR(-EFAULT);
}
mm1->key = uresp.key;
mm1->addr = virt_to_phys(qhp->wq.queue);
==== //depot/projects/iwarp/sys/sys/linux_compat.h#8 (text+ko) ====
@@ -106,18 +106,23 @@
static inline void *idr_find(struct idr *x, uint32_t key)
{
struct idr *i;
- for (i=x;i;i=i->next) if (i->key==key) return(i->value);
+ for (i=x->next;i;i=i->next) if (i->key==key) return(i->value);
return(0);
}
static inline int idr_pre_get(struct idr *idp, unsigned int gfp){return(1);}
-static inline int idr_get_new(struct idr **idp, void *ptr, int *id)
+static inline int idr_get_new(struct idr *idp, void *ptr, int *id)
{
- struct idr *i=malloc(sizeof(struct idr),M_TEMP,M_WAITOK);
+ struct idr *i;
+
+ for (i=idp->next;i;i=i->next)
+ if (i->key == *id)
+ return -EEXIST;
+ i=malloc(sizeof(struct idr),M_TEMP,M_WAITOK);
i->key=*id;
i->value=ptr;
- i->next=*idp;
- *idp=i;
+ i->next = idp->next;
+ idp->next = i;
return(0);
}
@@ -126,14 +131,14 @@
int newid = starting_id;
struct idr *i;
- for (i=idp;i;i=((i)->next))
+ for (i=idp->next;i;i=i->next)
if (i->key > newid)
newid = i->key;
i=malloc(sizeof(struct idr),M_TEMP,M_WAITOK);
i->key=newid;
i->value=ptr;
- i->next=idp;
- idp=i;
+ i->next=idp->next;
+ idp->next=i;
*id = newid;
return(0);
}
@@ -141,22 +146,30 @@
static inline void idr_remove(struct idr *idp, int id)
{
/* leak */
- struct idr *i;
- for (i=idp;i;i=((i)->next))
+ struct idr *i, *prev=NULL;
+ for (i=idp->next;i;prev=i,i=i->next)
if ((i)->key==id) {
- i=(i)->next;
+ if (!prev)
+ idp->next = i->next;
+ else
+ prev->next = i->next;
+ free(i, M_TEMP);
return;
}
}
-static inline void idr_init(struct idr *idp){}
+static inline void idr_init(struct idr *idp)
+{
+ idp->next = NULL;
+}
static inline void idr_destroy(struct idr *idp)
{
struct idr *i, *tmp;
for (i=idp->next;i;i=tmp) {
- tmp=(i)->next;
+ tmp=i->next;
free(i, M_TEMP);
}
+ idp->next = NULL;
}
static __inline void
==== //depot/projects/iwarp/sys/sys/linux_pci.h#2 (text+ko) ====
@@ -232,7 +232,11 @@
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, int flag)
{
- return(pci_alloc_consistent(0,size,dma_handle));
+ void *r;
+
+ r = contigmalloc(size, M_DEVBUF, M_WAITOK, 0ul, ~0ul, 4096, 0);
+ if (r) *dma_handle=virt_to_phys(r);
+ return(r);
}
static inline void dma_free_coherent(struct device **dev, size_t size,
More information about the p4-projects
mailing list