sparc: Convert all SBUS drivers to dma_*() interfaces.

And all the SBUS dma interfaces are deleted.

A private implementation remains inside of the 32-bit sparc port which
exists only for the sake of the implementation of dma_*().

Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c
index c174621..858880b 100644
--- a/drivers/net/myri_sbus.c
+++ b/drivers/net/myri_sbus.c
@@ -22,6 +22,7 @@
 #include <linux/etherdevice.h>
 #include <linux/skbuff.h>
 #include <linux/bitops.h>
+#include <linux/dma-mapping.h>
 
 #include <net/dst.h>
 #include <net/arp.h>
@@ -243,8 +244,8 @@
 			u32 dma_addr;
 
 			dma_addr = sbus_readl(&rxd->myri_scatters[0].addr);
-			sbus_unmap_single(&mp->myri_sdev->ofdev.dev, dma_addr,
-					  RX_ALLOC_SIZE, SBUS_DMA_FROMDEVICE);
+			dma_unmap_single(&mp->myri_sdev->ofdev.dev, dma_addr,
+					 RX_ALLOC_SIZE, DMA_FROM_DEVICE);
 			dev_kfree_skb(mp->rx_skbs[i]);
 			mp->rx_skbs[i] = NULL;
 		}
@@ -260,9 +261,9 @@
 			u32 dma_addr;
 
 			dma_addr = sbus_readl(&txd->myri_gathers[0].addr);
-			sbus_unmap_single(&mp->myri_sdev->ofdev.dev, dma_addr,
-					  (skb->len + 3) & ~3,
-					  SBUS_DMA_TODEVICE);
+			dma_unmap_single(&mp->myri_sdev->ofdev.dev, dma_addr,
+					 (skb->len + 3) & ~3,
+					 DMA_TO_DEVICE);
 			dev_kfree_skb(mp->tx_skbs[i]);
 			mp->tx_skbs[i] = NULL;
 		}
@@ -291,9 +292,9 @@
 		skb->dev = dev;
 		skb_put(skb, RX_ALLOC_SIZE);
 
-		dma_addr = sbus_map_single(&mp->myri_sdev->ofdev.dev,
-					   skb->data, RX_ALLOC_SIZE,
-					   SBUS_DMA_FROMDEVICE);
+		dma_addr = dma_map_single(&mp->myri_sdev->ofdev.dev,
+					  skb->data, RX_ALLOC_SIZE,
+					  DMA_FROM_DEVICE);
 		sbus_writel(dma_addr, &rxd[i].myri_scatters[0].addr);
 		sbus_writel(RX_ALLOC_SIZE, &rxd[i].myri_scatters[0].len);
 		sbus_writel(i, &rxd[i].ctx);
@@ -349,8 +350,8 @@
 
 		DTX(("SKB[%d] ", entry));
 		dma_addr = sbus_readl(&sq->myri_txd[entry].myri_gathers[0].addr);
-		sbus_unmap_single(&mp->myri_sdev->ofdev.dev, dma_addr,
-				  skb->len, SBUS_DMA_TODEVICE);
+		dma_unmap_single(&mp->myri_sdev->ofdev.dev, dma_addr,
+				 skb->len, DMA_TO_DEVICE);
 		dev_kfree_skb(skb);
 		mp->tx_skbs[entry] = NULL;
 		dev->stats.tx_packets++;
@@ -429,9 +430,9 @@
 
 		/* Check for errors. */
 		DRX(("rxd[%d]: %p len[%d] csum[%08x] ", entry, rxd, len, csum));
-		sbus_dma_sync_single_for_cpu(&mp->myri_sdev->ofdev.dev,
-					     sbus_readl(&rxd->myri_scatters[0].addr),
-					     RX_ALLOC_SIZE, SBUS_DMA_FROMDEVICE);
+		dma_sync_single_for_cpu(&mp->myri_sdev->ofdev.dev,
+					sbus_readl(&rxd->myri_scatters[0].addr),
+					RX_ALLOC_SIZE, DMA_FROM_DEVICE);
 		if (len < (ETH_HLEN + MYRI_PAD_LEN) || (skb->data[0] != MYRI_PAD_LEN)) {
 			DRX(("ERROR["));
 			dev->stats.rx_errors++;
@@ -448,10 +449,10 @@
 			drops++;
 			DRX(("DROP "));
 			dev->stats.rx_dropped++;
-			sbus_dma_sync_single_for_device(&mp->myri_sdev->ofdev.dev,
-							sbus_readl(&rxd->myri_scatters[0].addr),
-							RX_ALLOC_SIZE,
-							SBUS_DMA_FROMDEVICE);
+			dma_sync_single_for_device(&mp->myri_sdev->ofdev.dev,
+						   sbus_readl(&rxd->myri_scatters[0].addr),
+						   RX_ALLOC_SIZE,
+						   DMA_FROM_DEVICE);
 			sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
 			sbus_writel(index, &rxd->ctx);
 			sbus_writel(1, &rxd->num_sg);
@@ -470,17 +471,17 @@
 				DRX(("skb_alloc(FAILED) "));
 				goto drop_it;
 			}
-			sbus_unmap_single(&mp->myri_sdev->ofdev.dev,
-					  sbus_readl(&rxd->myri_scatters[0].addr),
-					  RX_ALLOC_SIZE,
-					  SBUS_DMA_FROMDEVICE);
+			dma_unmap_single(&mp->myri_sdev->ofdev.dev,
+					 sbus_readl(&rxd->myri_scatters[0].addr),
+					 RX_ALLOC_SIZE,
+					 DMA_FROM_DEVICE);
 			mp->rx_skbs[index] = new_skb;
 			new_skb->dev = dev;
 			skb_put(new_skb, RX_ALLOC_SIZE);
-			dma_addr = sbus_map_single(&mp->myri_sdev->ofdev.dev,
-						   new_skb->data,
-						   RX_ALLOC_SIZE,
-						   SBUS_DMA_FROMDEVICE);
+			dma_addr = dma_map_single(&mp->myri_sdev->ofdev.dev,
+						  new_skb->data,
+						  RX_ALLOC_SIZE,
+						  DMA_FROM_DEVICE);
 			sbus_writel(dma_addr, &rxd->myri_scatters[0].addr);
 			sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
 			sbus_writel(index, &rxd->ctx);
@@ -506,10 +507,10 @@
 
 			/* Reuse original ring buffer. */
 			DRX(("reuse "));
-			sbus_dma_sync_single_for_device(&mp->myri_sdev->ofdev.dev,
-							sbus_readl(&rxd->myri_scatters[0].addr),
-							RX_ALLOC_SIZE,
-							SBUS_DMA_FROMDEVICE);
+			dma_sync_single_for_device(&mp->myri_sdev->ofdev.dev,
+						   sbus_readl(&rxd->myri_scatters[0].addr),
+						   RX_ALLOC_SIZE,
+						   DMA_FROM_DEVICE);
 			sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
 			sbus_writel(index, &rxd->ctx);
 			sbus_writel(1, &rxd->num_sg);
@@ -658,8 +659,8 @@
 		sbus_writew((skb->data[4] << 8) | skb->data[5], &txd->addr[3]);
 	}
 
-	dma_addr = sbus_map_single(&mp->myri_sdev->ofdev.dev, skb->data,
-				   len, SBUS_DMA_TODEVICE);
+	dma_addr = dma_map_single(&mp->myri_sdev->ofdev.dev, skb->data,
+				  len, DMA_TO_DEVICE);
 	sbus_writel(dma_addr, &txd->myri_gathers[0].addr);
 	sbus_writel(len, &txd->myri_gathers[0].len);
 	sbus_writel(1, &txd->num_sg);
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c
index b92218c..8fe4c49 100644
--- a/drivers/net/sunbmac.c
+++ b/drivers/net/sunbmac.c
@@ -23,6 +23,7 @@
 #include <linux/etherdevice.h>
 #include <linux/skbuff.h>
 #include <linux/bitops.h>
+#include <linux/dma-mapping.h>
 
 #include <asm/auxio.h>
 #include <asm/byteorder.h>
@@ -239,9 +240,10 @@
 		skb_reserve(skb, 34);
 
 		bb->be_rxd[i].rx_addr =
-			sbus_map_single(&bp->bigmac_sdev->ofdev.dev, skb->data,
-					RX_BUF_ALLOC_SIZE - 34,
-					SBUS_DMA_FROMDEVICE);
+			dma_map_single(&bp->bigmac_sdev->ofdev.dev,
+				       skb->data,
+				       RX_BUF_ALLOC_SIZE - 34,
+				       DMA_FROM_DEVICE);
 		bb->be_rxd[i].rx_flags =
 			(RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));
 	}
@@ -776,9 +778,9 @@
 		skb = bp->tx_skbs[elem];
 		bp->enet_stats.tx_packets++;
 		bp->enet_stats.tx_bytes += skb->len;
-		sbus_unmap_single(&bp->bigmac_sdev->ofdev.dev,
-				  this->tx_addr, skb->len,
-				  SBUS_DMA_TODEVICE);
+		dma_unmap_single(&bp->bigmac_sdev->ofdev.dev,
+				 this->tx_addr, skb->len,
+				 DMA_TO_DEVICE);
 
 		DTX(("skb(%p) ", skb));
 		bp->tx_skbs[elem] = NULL;
@@ -831,19 +833,19 @@
 				drops++;
 				goto drop_it;
 			}
-			sbus_unmap_single(&bp->bigmac_sdev->ofdev.dev,
-					  this->rx_addr,
-					  RX_BUF_ALLOC_SIZE - 34,
-					  SBUS_DMA_FROMDEVICE);
+			dma_unmap_single(&bp->bigmac_sdev->ofdev.dev,
+					 this->rx_addr,
+					 RX_BUF_ALLOC_SIZE - 34,
+					 DMA_FROM_DEVICE);
 			bp->rx_skbs[elem] = new_skb;
 			new_skb->dev = bp->dev;
 			skb_put(new_skb, ETH_FRAME_LEN);
 			skb_reserve(new_skb, 34);
 			this->rx_addr =
-				sbus_map_single(&bp->bigmac_sdev->ofdev.dev,
-						new_skb->data,
-						RX_BUF_ALLOC_SIZE - 34,
-						SBUS_DMA_FROMDEVICE);
+				dma_map_single(&bp->bigmac_sdev->ofdev.dev,
+					       new_skb->data,
+					       RX_BUF_ALLOC_SIZE - 34,
+					       DMA_FROM_DEVICE);
 			this->rx_flags =
 				(RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));
 
@@ -858,13 +860,13 @@
 			}
 			skb_reserve(copy_skb, 2);
 			skb_put(copy_skb, len);
-			sbus_dma_sync_single_for_cpu(&bp->bigmac_sdev->ofdev.dev,
-						     this->rx_addr, len,
-						     SBUS_DMA_FROMDEVICE);
+			dma_sync_single_for_cpu(&bp->bigmac_sdev->ofdev.dev,
+						this->rx_addr, len,
+						DMA_FROM_DEVICE);
 			skb_copy_to_linear_data(copy_skb, (unsigned char *)skb->data, len);
-			sbus_dma_sync_single_for_device(&bp->bigmac_sdev->ofdev.dev,
-							this->rx_addr, len,
-							SBUS_DMA_FROMDEVICE);
+			dma_sync_single_for_device(&bp->bigmac_sdev->ofdev.dev,
+						   this->rx_addr, len,
+						   DMA_FROM_DEVICE);
 
 			/* Reuse original ring buffer. */
 			this->rx_flags =
@@ -960,8 +962,8 @@
 	u32 mapping;
 
 	len = skb->len;
-	mapping = sbus_map_single(&bp->bigmac_sdev->ofdev.dev, skb->data,
-				  len, SBUS_DMA_TODEVICE);
+	mapping = dma_map_single(&bp->bigmac_sdev->ofdev.dev, skb->data,
+				 len, DMA_TO_DEVICE);
 
 	/* Avoid a race... */
 	spin_lock_irq(&bp->lock);
@@ -1185,9 +1187,9 @@
 	bigmac_stop(bp);
 
 	/* Allocate transmit/receive descriptor DVMA block. */
-	bp->bmac_block = sbus_alloc_consistent(&bp->bigmac_sdev->ofdev.dev,
-					       PAGE_SIZE,
-					       &bp->bblock_dvma);
+	bp->bmac_block = dma_alloc_coherent(&bp->bigmac_sdev->ofdev.dev,
+					    PAGE_SIZE,
+					    &bp->bblock_dvma, GFP_ATOMIC);
 	if (bp->bmac_block == NULL || bp->bblock_dvma == 0) {
 		printk(KERN_ERR "BIGMAC: Cannot allocate consistent DMA.\n");
 		goto fail_and_cleanup;
@@ -1247,10 +1249,10 @@
 		sbus_iounmap(bp->tregs, TCVR_REG_SIZE);
 
 	if (bp->bmac_block)
-		sbus_free_consistent(&bp->bigmac_sdev->ofdev.dev,
-				     PAGE_SIZE,
-				     bp->bmac_block,
-				     bp->bblock_dvma);
+		dma_free_coherent(&bp->bigmac_sdev->ofdev.dev,
+				  PAGE_SIZE,
+				  bp->bmac_block,
+				  bp->bblock_dvma);
 
 	/* This also frees the co-located 'dev->priv' */
 	free_netdev(dev);
@@ -1282,10 +1284,10 @@
 	sbus_iounmap(bp->creg, CREG_REG_SIZE);
 	sbus_iounmap(bp->bregs, BMAC_REG_SIZE);
 	sbus_iounmap(bp->tregs, TCVR_REG_SIZE);
-	sbus_free_consistent(&bp->bigmac_sdev->ofdev.dev,
-			     PAGE_SIZE,
-			     bp->bmac_block,
-			     bp->bblock_dvma);
+	dma_free_coherent(&bp->bigmac_sdev->ofdev.dev,
+			  PAGE_SIZE,
+			  bp->bmac_block,
+			  bp->bblock_dvma);
 
 	free_netdev(net_dev);
 
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index cd93fc5..69cc771 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -34,6 +34,7 @@
 #include <linux/skbuff.h>
 #include <linux/mm.h>
 #include <linux/bitops.h>
+#include <linux/dma-mapping.h>
 
 #include <asm/system.h>
 #include <asm/io.h>
@@ -277,13 +278,13 @@
 } while(0)
 #define hme_read_desc32(__hp, __p)	((__force u32)(hme32)*(__p))
 #define hme_dma_map(__hp, __ptr, __size, __dir) \
-	sbus_map_single((__hp)->dma_dev, (__ptr), (__size), (__dir))
+	dma_map_single((__hp)->dma_dev, (__ptr), (__size), (__dir))
 #define hme_dma_unmap(__hp, __addr, __size, __dir) \
-	sbus_unmap_single((__hp)->dma_dev, (__addr), (__size), (__dir))
+	dma_unmap_single((__hp)->dma_dev, (__addr), (__size), (__dir))
 #define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \
-	sbus_dma_sync_single_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir))
+	dma_dma_sync_single_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir))
 #define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \
-	sbus_dma_sync_single_for_device((__hp)->dma_dev, (__addr), (__size), (__dir))
+	dma_dma_sync_single_for_device((__hp)->dma_dev, (__addr), (__size), (__dir))
 #else
 /* PCI only compilation */
 #define hme_write32(__hp, __reg, __val) \
@@ -316,25 +317,6 @@
 #endif
 
 
-#ifdef SBUS_DMA_BIDIRECTIONAL
-#	define DMA_BIDIRECTIONAL	SBUS_DMA_BIDIRECTIONAL
-#else
-#	define DMA_BIDIRECTIONAL	0
-#endif
-
-#ifdef SBUS_DMA_FROMDEVICE
-#	define DMA_FROMDEVICE		SBUS_DMA_FROMDEVICE
-#else
-#	define DMA_TODEVICE		1
-#endif
-
-#ifdef SBUS_DMA_TODEVICE
-#	define DMA_TODEVICE		SBUS_DMA_TODEVICE
-#else
-#	define DMA_FROMDEVICE		2
-#endif
-
-
 /* Oh yes, the MIF BitBang is mighty fun to program.  BitBucket is more like it. */
 static void BB_PUT_BIT(struct happy_meal *hp, void __iomem *tregs, int bit)
 {
@@ -1224,7 +1206,7 @@
 
 			rxd = &hp->happy_block->happy_meal_rxd[i];
 			dma_addr = hme_read_desc32(hp, &rxd->rx_addr);
-			hme_dma_unmap(hp, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROMDEVICE);
+			hme_dma_unmap(hp, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
 			dev_kfree_skb_any(skb);
 			hp->rx_skbs[i] = NULL;
 		}
@@ -1245,7 +1227,7 @@
 				hme_dma_unmap(hp, dma_addr,
 					      (hme_read_desc32(hp, &txd->tx_flags)
 					       & TXFLAG_SIZE),
-					      DMA_TODEVICE);
+					      DMA_TO_DEVICE);
 
 				if (frag != skb_shinfo(skb)->nr_frags)
 					i++;
@@ -1287,7 +1269,7 @@
 		skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
 		hme_write_rxd(hp, &hb->happy_meal_rxd[i],
 			      (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)),
-			      hme_dma_map(hp, skb->data, RX_BUF_ALLOC_SIZE, DMA_FROMDEVICE));
+			      hme_dma_map(hp, skb->data, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE));
 		skb_reserve(skb, RX_OFFSET);
 	}
 
@@ -1966,7 +1948,7 @@
 			dma_len = hme_read_desc32(hp, &this->tx_flags);
 
 			dma_len &= TXFLAG_SIZE;
-			hme_dma_unmap(hp, dma_addr, dma_len, DMA_TODEVICE);
+			hme_dma_unmap(hp, dma_addr, dma_len, DMA_TO_DEVICE);
 
 			elem = NEXT_TX(elem);
 			this = &txbase[elem];
@@ -2044,13 +2026,13 @@
 				drops++;
 				goto drop_it;
 			}
-			hme_dma_unmap(hp, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROMDEVICE);
+			hme_dma_unmap(hp, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
 			hp->rx_skbs[elem] = new_skb;
 			new_skb->dev = dev;
 			skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
 			hme_write_rxd(hp, this,
 				      (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
-				      hme_dma_map(hp, new_skb->data, RX_BUF_ALLOC_SIZE, DMA_FROMDEVICE));
+				      hme_dma_map(hp, new_skb->data, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE));
 			skb_reserve(new_skb, RX_OFFSET);
 
 			/* Trim the original skb for the netif. */
@@ -2065,9 +2047,9 @@
 
 			skb_reserve(copy_skb, 2);
 			skb_put(copy_skb, len);
-			hme_dma_sync_for_cpu(hp, dma_addr, len, DMA_FROMDEVICE);
+			hme_dma_sync_for_cpu(hp, dma_addr, len, DMA_FROM_DEVICE);
 			skb_copy_from_linear_data(skb, copy_skb->data, len);
-			hme_dma_sync_for_device(hp, dma_addr, len, DMA_FROMDEVICE);
+			hme_dma_sync_for_device(hp, dma_addr, len, DMA_FROM_DEVICE);
 
 			/* Reuse original ring buffer. */
 			hme_write_rxd(hp, this,
@@ -2300,7 +2282,7 @@
 		u32 mapping, len;
 
 		len = skb->len;
-		mapping = hme_dma_map(hp, skb->data, len, DMA_TODEVICE);
+		mapping = hme_dma_map(hp, skb->data, len, DMA_TO_DEVICE);
 		tx_flags |= (TXFLAG_SOP | TXFLAG_EOP);
 		hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
 			      (tx_flags | (len & TXFLAG_SIZE)),
@@ -2314,7 +2296,7 @@
 		 * Otherwise we could race with the device.
 		 */
 		first_len = skb_headlen(skb);
-		first_mapping = hme_dma_map(hp, skb->data, first_len, DMA_TODEVICE);
+		first_mapping = hme_dma_map(hp, skb->data, first_len, DMA_TO_DEVICE);
 		entry = NEXT_TX(entry);
 
 		for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
@@ -2325,7 +2307,7 @@
 			mapping = hme_dma_map(hp,
 					      ((void *) page_address(this_frag->page) +
 					       this_frag->page_offset),
-					      len, DMA_TODEVICE);
+					      len, DMA_TO_DEVICE);
 			this_txflags = tx_flags;
 			if (frag == skb_shinfo(skb)->nr_frags - 1)
 				this_txflags |= TXFLAG_EOP;
@@ -2786,9 +2768,10 @@
 	hp->happy_bursts = of_getintprop_default(sdev->bus->ofdev.node,
 						 "burst-sizes", 0x00);
 
-	hp->happy_block = sbus_alloc_consistent(hp->dma_dev,
-						PAGE_SIZE,
-						&hp->hblock_dvma);
+	hp->happy_block = dma_alloc_coherent(hp->dma_dev,
+					     PAGE_SIZE,
+					     &hp->hblock_dvma,
+					     GFP_ATOMIC);
 	err = -ENOMEM;
 	if (!hp->happy_block) {
 		printk(KERN_ERR "happymeal: Cannot allocate descriptors.\n");
@@ -2824,12 +2807,12 @@
 	hp->read_desc32 = sbus_hme_read_desc32;
 	hp->write_txd = sbus_hme_write_txd;
 	hp->write_rxd = sbus_hme_write_rxd;
-	hp->dma_map = (u32 (*)(void *, void *, long, int))sbus_map_single;
-	hp->dma_unmap = (void (*)(void *, u32, long, int))sbus_unmap_single;
+	hp->dma_map = (u32 (*)(void *, void *, long, int))dma_map_single;
+	hp->dma_unmap = (void (*)(void *, u32, long, int))dma_unmap_single;
 	hp->dma_sync_for_cpu = (void (*)(void *, u32, long, int))
-		sbus_dma_sync_single_for_cpu;
+		dma_sync_single_for_cpu;
 	hp->dma_sync_for_device = (void (*)(void *, u32, long, int))
-		sbus_dma_sync_single_for_device;
+		dma_sync_single_for_device;
 	hp->read32 = sbus_hme_read32;
 	hp->write32 = sbus_hme_write32;
 #endif
@@ -2844,7 +2827,7 @@
 	if (register_netdev(hp->dev)) {
 		printk(KERN_ERR "happymeal: Cannot register net device, "
 		       "aborting.\n");
-		goto err_out_free_consistent;
+		goto err_out_free_coherent;
 	}
 
 	dev_set_drvdata(&sdev->ofdev.dev, hp);
@@ -2860,11 +2843,11 @@
 
 	return 0;
 
-err_out_free_consistent:
-	sbus_free_consistent(hp->dma_dev,
-			     PAGE_SIZE,
-			     hp->happy_block,
-			     hp->hblock_dvma);
+err_out_free_coherent:
+	dma_free_coherent(hp->dma_dev,
+			  PAGE_SIZE,
+			  hp->happy_block,
+			  hp->hblock_dvma);
 
 err_out_iounmap:
 	if (hp->gregs)
@@ -3308,10 +3291,10 @@
 	sbus_iounmap(hp->erxregs, ERX_REG_SIZE);
 	sbus_iounmap(hp->bigmacregs, BMAC_REG_SIZE);
 	sbus_iounmap(hp->tcvregs, TCVR_REG_SIZE);
-	sbus_free_consistent(hp->dma_dev,
-			     PAGE_SIZE,
-			     hp->happy_block,
-			     hp->hblock_dvma);
+	dma_free_coherent(hp->dma_dev,
+			  PAGE_SIZE,
+			  hp->happy_block,
+			  hp->hblock_dvma);
 
 	free_netdev(net_dev);
 
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 4f4baf9..6575888 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -91,6 +91,7 @@
 #include <linux/skbuff.h>
 #include <linux/ethtool.h>
 #include <linux/bitops.h>
+#include <linux/dma-mapping.h>
 
 #include <asm/system.h>
 #include <asm/io.h>
@@ -1283,10 +1284,10 @@
 		sbus_iounmap(lp->init_block_iomem,
 			     sizeof(struct lance_init_block));
 	} else if (lp->init_block_mem) {
-		sbus_free_consistent(&lp->sdev->ofdev.dev,
-				     sizeof(struct lance_init_block),
-				     lp->init_block_mem,
-				     lp->init_block_dvma);
+		dma_free_coherent(&lp->sdev->ofdev.dev,
+				  sizeof(struct lance_init_block),
+				  lp->init_block_mem,
+				  lp->init_block_dvma);
 	}
 }
 
@@ -1384,9 +1385,9 @@
 		lp->tx = lance_tx_pio;
 	} else {
 		lp->init_block_mem =
-			sbus_alloc_consistent(&sdev->ofdev.dev,
-					      sizeof(struct lance_init_block),
-					      &lp->init_block_dvma);
+			dma_alloc_coherent(&sdev->ofdev.dev,
+					   sizeof(struct lance_init_block),
+					   &lp->init_block_dvma, GFP_ATOMIC);
 		if (!lp->init_block_mem || lp->init_block_dvma == 0) {
 			printk(KERN_ERR "SunLance: Cannot allocate consistent DMA memory.\n");
 			goto fail;
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c
index ac8049c..66f66ee 100644
--- a/drivers/net/sunqe.c
+++ b/drivers/net/sunqe.c
@@ -24,6 +24,7 @@
 #include <linux/skbuff.h>
 #include <linux/ethtool.h>
 #include <linux/bitops.h>
+#include <linux/dma-mapping.h>
 
 #include <asm/system.h>
 #include <asm/io.h>
@@ -879,12 +880,12 @@
 		goto fail;
 	}
 
-	qe->qe_block = sbus_alloc_consistent(&qe->qe_sdev->ofdev.dev,
-					     PAGE_SIZE,
-					     &qe->qblock_dvma);
-	qe->buffers = sbus_alloc_consistent(&qe->qe_sdev->ofdev.dev,
-					    sizeof(struct sunqe_buffers),
-					    &qe->buffers_dvma);
+	qe->qe_block = dma_alloc_coherent(&qe->qe_sdev->ofdev.dev,
+					  PAGE_SIZE,
+					  &qe->qblock_dvma, GFP_ATOMIC);
+	qe->buffers = dma_alloc_coherent(&qe->qe_sdev->ofdev.dev,
+					 sizeof(struct sunqe_buffers),
+					 &qe->buffers_dvma, GFP_ATOMIC);
 	if (qe->qe_block == NULL || qe->qblock_dvma == 0 ||
 	    qe->buffers == NULL || qe->buffers_dvma == 0)
 		goto fail;
@@ -926,15 +927,15 @@
 	if (qe->mregs)
 		sbus_iounmap(qe->mregs, MREGS_REG_SIZE);
 	if (qe->qe_block)
-		sbus_free_consistent(&qe->qe_sdev->ofdev.dev,
-				     PAGE_SIZE,
-				     qe->qe_block,
-				     qe->qblock_dvma);
+		dma_free_coherent(&qe->qe_sdev->ofdev.dev,
+				  PAGE_SIZE,
+				  qe->qe_block,
+				  qe->qblock_dvma);
 	if (qe->buffers)
-		sbus_free_consistent(&qe->qe_sdev->ofdev.dev,
-				     sizeof(struct sunqe_buffers),
-				     qe->buffers,
-				     qe->buffers_dvma);
+		dma_free_coherent(&qe->qe_sdev->ofdev.dev,
+				  sizeof(struct sunqe_buffers),
+				  qe->buffers,
+				  qe->buffers_dvma);
 
 	free_netdev(dev);
 
@@ -957,14 +958,14 @@
 
 	sbus_iounmap(qp->qcregs, CREG_REG_SIZE);
 	sbus_iounmap(qp->mregs, MREGS_REG_SIZE);
-	sbus_free_consistent(&qp->qe_sdev->ofdev.dev,
-			     PAGE_SIZE,
-			     qp->qe_block,
-			     qp->qblock_dvma);
-	sbus_free_consistent(&qp->qe_sdev->ofdev.dev,
-			     sizeof(struct sunqe_buffers),
-			     qp->buffers,
-			     qp->buffers_dvma);
+	dma_free_coherent(&qp->qe_sdev->ofdev.dev,
+			  PAGE_SIZE,
+			  qp->qe_block,
+			  qp->qblock_dvma);
+	dma_free_coherent(&qp->qe_sdev->ofdev.dev,
+			  sizeof(struct sunqe_buffers),
+			  qp->buffers,
+			  qp->buffers_dvma);
 
 	free_netdev(net_dev);