net: cosmetic: Fix var naming net <-> eth drivers

Update the naming convention used in the network stack functions and
variables that Ethernet drivers use to interact with it.

This cleans up the temporary hacks that were added to this interface
along with the DM support.

This patch has a few remaining checkpatch.pl failures that would be out
of the scope of this patch to fix (drivers that are in gross violation
of checkpatch.pl).

Signed-off-by: Joe Hershberger <joe.hershberger@ni.com>
Acked-by: Simon Glass <sjg@chromium.org>
master
Joe Hershberger 10 years ago committed by Simon Glass
parent 1203fcceec
commit 1fd92db83d
  1. 12
      arch/mips/mach-au1x00/au1x00_eth.c
  2. 4
      arch/powerpc/cpu/mpc8260/ether_fcc.c
  3. 4
      arch/powerpc/cpu/mpc8260/ether_scc.c
  4. 4
      arch/powerpc/cpu/mpc85xx/ether_fcc.c
  5. 6
      arch/powerpc/cpu/mpc8xx/fec.c
  6. 5
      arch/powerpc/cpu/mpc8xx/scc.c
  7. 12
      doc/README.drivers.eth
  8. 14
      drivers/net/4xx_enet.c
  9. 15
      drivers/net/altera_tse.c
  10. 7
      drivers/net/armada100_fec.c
  11. 4
      drivers/net/at91_emac.c
  12. 6
      drivers/net/ax88180.c
  13. 6
      drivers/net/bcm-sf2-eth.c
  14. 4
      drivers/net/bfin_mac.c
  15. 2
      drivers/net/calxedaxgmac.c
  16. 4
      drivers/net/cpsw.c
  17. 5
      drivers/net/cs8900.c
  18. 5
      drivers/net/davinci_emac.c
  19. 9
      drivers/net/dc2114x.c
  20. 2
      drivers/net/designware.c
  21. 5
      drivers/net/dm9000x.c
  22. 5
      drivers/net/dnet.c
  23. 2
      drivers/net/e1000.c
  24. 3
      drivers/net/eepro100.c
  25. 13
      drivers/net/enc28j60.c
  26. 11
      drivers/net/ep93xx_eth.c
  27. 4
      drivers/net/ethoc.c
  28. 2
      drivers/net/fec_mxc.c
  29. 2
      drivers/net/fm/eth.c
  30. 23
      drivers/net/fsl_mcdmafec.c
  31. 4
      drivers/net/ftgmac100.c
  32. 4
      drivers/net/ftmac100.c
  33. 2
      drivers/net/ftmac110.c
  34. 2
      drivers/net/greth.c
  35. 2
      drivers/net/keystone_net.c
  36. 6
      drivers/net/ks8851_mll.c
  37. 19
      drivers/net/lan91c96.c
  38. 10
      drivers/net/lpc32xx_eth.c
  39. 8
      drivers/net/macb.c
  40. 5
      drivers/net/mcffec.c
  41. 3
      drivers/net/mpc512x_fec.c
  42. 2
      drivers/net/mpc5xxx_fec.c
  43. 41
      drivers/net/mvgbe.c
  44. 2
      drivers/net/mvneta.c
  45. 3
      drivers/net/natsemi.c
  46. 2
      drivers/net/ne2000_base.c
  47. 6
      drivers/net/ns8382x.c
  48. 2
      drivers/net/pch_gbe.c
  49. 2
      drivers/net/pcnet.c
  50. 4
      drivers/net/rtl8139.c
  51. 2
      drivers/net/rtl8169.c
  52. 2
      drivers/net/sh_eth.c
  53. 18
      drivers/net/smc91111.c
  54. 4
      drivers/net/smc911x.c
  55. 4
      drivers/net/sunxi_emac.c
  56. 7
      drivers/net/tsec.c
  57. 8
      drivers/net/tsi108_eth.c
  58. 5
      drivers/net/uli526x.c
  59. 2
      drivers/net/xilinx_axi_emac.c
  60. 2
      drivers/net/xilinx_emaclite.c
  61. 4
      drivers/net/xilinx_ll_temac_fifo.c
  62. 4
      drivers/net/xilinx_ll_temac_sdma.c
  63. 2
      drivers/net/zynq_gem.c
  64. 2
      drivers/qe/uec.c
  65. 3
      drivers/usb/eth/asix.c
  66. 2
      drivers/usb/eth/asix88179.c
  67. 2
      drivers/usb/eth/mcs7830.c
  68. 3
      drivers/usb/eth/smsc95xx.c
  69. 5
      drivers/usb/gadget/ether.c
  70. 7
      include/net.h
  71. 11
      net/net.c
  72. 6
      post/cpu/mpc8xx/ether.c

@ -187,13 +187,14 @@ static int au1x00_recv(struct eth_device* dev){
if(status&RX_ERROR){
printf("Rx error 0x%x\n", status);
}
else{
} else {
/* Pass the packet up to the protocol layers. */
NetReceive(NetRxPackets[next_rx], length - 4);
net_process_received_packet(net_rx_packets[next_rx],
length - 4);
}
fifo_rx[next_rx].addr = (virt_to_phys(NetRxPackets[next_rx]))|RX_DMA_ENABLE;
fifo_rx[next_rx].addr =
(virt_to_phys(net_rx_packets[next_rx])) | RX_DMA_ENABLE;
next_rx++;
if(next_rx>=NO_OF_FIFOS){
@ -234,7 +235,8 @@ static int au1x00_init(struct eth_device* dev, bd_t * bd){
for(i=0;i<NO_OF_FIFOS;i++){
fifo_tx[i].len = 0;
fifo_tx[i].addr = virt_to_phys(&txbuf[0]);
fifo_rx[i].addr = (virt_to_phys(NetRxPackets[i]))|RX_DMA_ENABLE;
fifo_rx[i].addr = (virt_to_phys(net_rx_packets[i])) |
RX_DMA_ENABLE;
}
/* Put mac addr in little endian */

@ -183,7 +183,7 @@ static int fec_recv(struct eth_device* dev)
}
else {
/* Pass the packet up to the protocol layers. */
NetReceive(NetRxPackets[rxIdx], length - 4);
net_process_received_packet(net_rx_packets[rxIdx], length - 4);
}
@ -243,7 +243,7 @@ static int fec_init(struct eth_device* dev, bd_t *bis)
{
rtx.rxbd[i].cbd_sc = BD_ENET_RX_EMPTY;
rtx.rxbd[i].cbd_datlen = 0;
rtx.rxbd[i].cbd_bufaddr = (uint)NetRxPackets[i];
rtx.rxbd[i].cbd_bufaddr = (uint)net_rx_packets[i];
}
rtx.rxbd[PKTBUFSRX - 1].cbd_sc |= BD_ENET_RX_WRAP;

@ -146,7 +146,7 @@ static int sec_rx(struct eth_device *dev)
else
{
/* Pass the packet up to the protocol layers. */
NetReceive(NetRxPackets[rxIdx], length - 4);
net_process_received_packet(net_rx_packets[rxIdx], length - 4);
}
@ -263,7 +263,7 @@ static int sec_init(struct eth_device *dev, bd_t *bis)
{
rtx->rxbd[i].cbd_sc = BD_ENET_RX_EMPTY;
rtx->rxbd[i].cbd_datlen = 0; /* Reset */
rtx->rxbd[i].cbd_bufaddr = (uint)NetRxPackets[i];
rtx->rxbd[i].cbd_bufaddr = (uint)net_rx_packets[i];
}
rtx->rxbd[PKTBUFSRX - 1].cbd_sc |= BD_ENET_RX_WRAP;

@ -186,7 +186,7 @@ static int fec_recv(struct eth_device* dev)
}
else {
/* Pass the packet up to the protocol layers. */
NetReceive(NetRxPackets[rxIdx], length - 4);
net_process_received_packet(net_rx_packets[rxIdx], length - 4);
}
@ -263,7 +263,7 @@ static int fec_init(struct eth_device* dev, bd_t *bis)
{
rtx.rxbd[i].cbd_sc = BD_ENET_RX_EMPTY;
rtx.rxbd[i].cbd_datlen = 0;
rtx.rxbd[i].cbd_bufaddr = (uint)NetRxPackets[i];
rtx.rxbd[i].cbd_bufaddr = (uint)net_rx_packets[i];
}
rtx.rxbd[PKTBUFSRX - 1].cbd_sc |= BD_ENET_RX_WRAP;

@ -247,7 +247,7 @@ static int fec_recv (struct eth_device *dev)
rtx->rxbd[rxIdx].cbd_sc);
#endif
} else {
uchar *rx = NetRxPackets[rxIdx];
uchar *rx = net_rx_packets[rxIdx];
length -= 4;
@ -261,7 +261,7 @@ static int fec_recv (struct eth_device *dev)
* Pass the packet up to the protocol layers.
*/
if (rx != NULL)
NetReceive (rx, length);
net_process_received_packet(rx, length);
}
/* Give the buffer back to the FEC. */
@ -576,7 +576,7 @@ static int fec_init (struct eth_device *dev, bd_t * bd)
for (i = 0; i < PKTBUFSRX; i++) {
rtx->rxbd[i].cbd_sc = BD_ENET_RX_EMPTY;
rtx->rxbd[i].cbd_datlen = 0; /* Reset */
rtx->rxbd[i].cbd_bufaddr = (uint) NetRxPackets[i];
rtx->rxbd[i].cbd_bufaddr = (uint) net_rx_packets[i];
}
rtx->rxbd[PKTBUFSRX - 1].cbd_sc |= BD_ENET_RX_WRAP;

@ -159,7 +159,8 @@ static int scc_recv (struct eth_device *dev)
#endif
} else {
/* Pass the packet up to the protocol layers. */
NetReceive (NetRxPackets[rxIdx], length - 4);
net_process_received_packet(net_rx_packets[rxIdx],
length - 4);
}
@ -280,7 +281,7 @@ static int scc_init (struct eth_device *dev, bd_t * bis)
for (i = 0; i < PKTBUFSRX; i++) {
rtx->rxbd[i].cbd_sc = BD_ENET_RX_EMPTY;
rtx->rxbd[i].cbd_datlen = 0; /* Reset */
rtx->rxbd[i].cbd_bufaddr = (uint) NetRxPackets[i];
rtx->rxbd[i].cbd_bufaddr = (uint) net_rx_packets[i];
}
rtx->rxbd[PKTBUFSRX - 1].cbd_sc |= BD_ENET_RX_WRAP;

@ -141,11 +141,11 @@ function can be called multiple times in a row.
The recv function should process packets as long as the hardware has them
readily available before returning. i.e. you should drain the hardware fifo.
For each packet you receive, you should call the NetReceive() function on it
For each packet you receive, you should call the net_process_received_packet() function on it
along with the packet length. The common code sets up packet buffers for you
already in the .bss (NetRxPackets), so there should be no need to allocate your
own. This doesn't mean you must use the NetRxPackets array however; you're
free to call the NetReceive() function with any buffer you wish. So the pseudo
already in the .bss (net_rx_packets), so there should be no need to allocate your
own. This doesn't mean you must use the net_rx_packets array however; you're
free to call the net_process_received_packet() function with any buffer you wish. So the pseudo
code here would look something like:
int ape_recv(struct eth_device *dev)
{
@ -153,9 +153,9 @@ int ape_recv(struct eth_device *dev)
...
while (packets_are_available()) {
...
length = ape_get_packet(&NetRxPackets[i]);
length = ape_get_packet(&net_rx_packets[i]);
...
NetReceive(&NetRxPackets[i], length);
net_process_received_packet(&net_rx_packets[i], length);
...
if (++i >= PKTBUFSRX)
i = 0;

@ -1350,7 +1350,7 @@ get_speed:
for (i = 0; i < NUM_RX_BUFF; i++) {
hw_p->rx[i].ctrl = 0;
hw_p->rx[i].data_len = 0;
hw_p->rx[i].data_ptr = (char *)NetRxPackets[i];
hw_p->rx[i].data_ptr = (char *)net_rx_packets[i];
if ((NUM_RX_BUFF - 1) == i)
hw_p->rx[i].ctrl |= MAL_RX_CTRL_WRAP;
hw_p->rx[i].ctrl |= MAL_RX_CTRL_EMPTY | MAL_RX_CTRL_INTR;
@ -1858,13 +1858,17 @@ static int ppc_4xx_eth_rx (struct eth_device *dev)
length = hw_p->rx[user_index].data_len & 0x0fff;
/* Pass the packet up to the protocol layers. */
/* NetReceive(NetRxPackets[rxIdx], length - 4); */
/* NetReceive(NetRxPackets[i], length); */
/*
* Pass the packet up to the protocol layers.
* net_process_received_packet(net_rx_packets[rxIdx],
* length - 4);
* net_process_received_packet(net_rx_packets[i], length);
*/
invalidate_dcache_range((u32)hw_p->rx[user_index].data_ptr,
(u32)hw_p->rx[user_index].data_ptr +
length - 4);
NetReceive (NetRxPackets[user_index], length - 4);
net_process_received_packet(net_rx_packets[user_index],
length - 4);
/* Free Recv Buffer */
hw_p->rx[user_index].ctrl |= MAL_RX_CTRL_EMPTY;
/* Free rx buffer descriptor queue */

@ -303,16 +303,17 @@ static int tse_eth_rx(struct eth_device *dev)
ALT_SGDMA_DESCRIPTOR_STATUS_TERMINATED_BY_EOP_MSK) {
debug("got packet\n");
packet_length = rx_desc->actual_bytes_transferred;
NetReceive(NetRxPackets[0], packet_length);
net_process_received_packet(net_rx_packets[0], packet_length);
/* start descriptor again */
flush_dcache_range((unsigned long)(NetRxPackets[0]),
(unsigned long)(NetRxPackets[0]) + PKTSIZE_ALIGN);
flush_dcache_range((unsigned long)(net_rx_packets[0]),
(unsigned long)(net_rx_packets[0] +
PKTSIZE_ALIGN));
alt_sgdma_construct_descriptor_burst(
(volatile struct alt_sgdma_descriptor *)&rx_desc[0],
(volatile struct alt_sgdma_descriptor *)&rx_desc[1],
(unsigned int)0x0, /* read addr */
(unsigned int *)NetRxPackets[0],
(unsigned int *)net_rx_packets[0],
0x0, /* length or EOP */
0x0, /* gen eop */
0x0, /* read fixed */
@ -835,13 +836,13 @@ static int tse_eth_init(struct eth_device *dev, bd_t * bd)
0x0 /* channel */
);
debug("Configuring rx desc\n");
flush_dcache_range((unsigned long)(NetRxPackets[0]),
(unsigned long)(NetRxPackets[0]) + PKTSIZE_ALIGN);
flush_dcache_range((unsigned long)(net_rx_packets[0]),
(unsigned long)(net_rx_packets[0]) + PKTSIZE_ALIGN);
alt_sgdma_construct_descriptor_burst(
(volatile struct alt_sgdma_descriptor *)&rx_desc[0],
(volatile struct alt_sgdma_descriptor *)&rx_desc[1],
(unsigned int)0x0, /* read addr */
(unsigned int *)NetRxPackets[0],
(unsigned int *)net_rx_packets[0],
0x0, /* length or EOP */
0x0, /* gen eop */
0x0, /* read fixed */

@ -639,15 +639,16 @@ static int armdfec_recv(struct eth_device *dev)
} else {
/* !!! call higher layer processing */
debug("ARMD100 FEC: (%s) Sending Received packet to"
" upper layer (NetReceive)\n", __func__);
" upper layer (net_process_received_packet)\n", __func__);
/*
* let the upper layer handle the packet, subtract offset
* as two dummy bytes are added in received buffer see
* PORT_CONFIG_EXT register bit TWO_Byte_Stuff_Mode bit.
*/
NetReceive((p_rxdesc_curr->buf_ptr + RX_BUF_OFFSET),
(int)(p_rxdesc_curr->byte_cnt - RX_BUF_OFFSET));
net_process_received_packet(
p_rxdesc_curr->buf_ptr + RX_BUF_OFFSET,
(int)(p_rxdesc_curr->byte_cnt - RX_BUF_OFFSET));
}
/*
* free these descriptors and point next in the ring

@ -352,7 +352,7 @@ static int at91emac_init(struct eth_device *netdev, bd_t *bd)
/* Init Ethernet buffers */
for (i = 0; i < RBF_FRAMEMAX; i++) {
dev->rbfdt[i].addr = (unsigned long) NetRxPackets[i];
dev->rbfdt[i].addr = (unsigned long) net_rx_packets[i];
dev->rbfdt[i].size = 0;
}
dev->rbfdt[RBF_FRAMEMAX - 1].addr |= RBF_WRAP;
@ -420,7 +420,7 @@ static int at91emac_recv(struct eth_device *netdev)
rbfp = &dev->rbfdt[dev->rbindex];
while (rbfp->addr & RBF_OWNER) {
size = rbfp->size & RBF_SIZE;
NetReceive(NetRxPackets[dev->rbindex], size);
net_process_received_packet(net_rx_packets[dev->rbindex], size);
debug_cond(DEBUG_AT91EMAC, "Recv[%ld]: %d bytes @ %lx\n",
dev->rbindex, size, rbfp->addr);

@ -192,9 +192,9 @@ static void ax88180_rx_handler (struct eth_device *dev)
unsigned short rxcurt_ptr, rxbound_ptr, next_ptr;
int i;
#if defined (CONFIG_DRIVER_AX88180_16BIT)
unsigned short *rxdata = (unsigned short *)NetRxPackets[0];
unsigned short *rxdata = (unsigned short *)net_rx_packets[0];
#else
unsigned long *rxdata = (unsigned long *)NetRxPackets[0];
unsigned long *rxdata = (unsigned long *)net_rx_packets[0];
#endif
unsigned short count;
@ -237,7 +237,7 @@ static void ax88180_rx_handler (struct eth_device *dev)
OUTW (dev, RX_STOP_READ, RXINDICATOR);
/* Pass the packet up to the protocol layers. */
NetReceive (NetRxPackets[0], data_size);
net_process_received_packet(net_rx_packets[0], data_size);
OUTW (dev, rxbound_ptr, RXBOUND);

@ -103,7 +103,7 @@ static int bcm_sf2_eth_send(struct eth_device *dev, void *packet, int length)
static int bcm_sf2_eth_receive(struct eth_device *dev)
{
struct eth_dma *dma = &(((struct eth_info *)(dev->priv))->dma);
uint8_t *buf = (uint8_t *)NetRxPackets[0];
uint8_t *buf = (uint8_t *)net_rx_packets[0];
int rcvlen;
int rc = 0;
int i = 0;
@ -124,11 +124,11 @@ static int bcm_sf2_eth_receive(struct eth_device *dev)
debug("recieved\n");
/* Forward received packet to uboot network handler */
NetReceive(buf, rcvlen);
net_process_received_packet(buf, rcvlen);
if (++i >= PKTBUFSRX)
i = 0;
buf = NetRxPackets[i];
buf = net_rx_packets[i];
}
}

@ -189,8 +189,8 @@ static int bfin_EMAC_recv(struct eth_device *dev)
debug("%s: len = %d\n", __func__, length - 4);
NetRxPackets[rxIdx] = rxbuf[rxIdx]->FrmData->Dest;
NetReceive(NetRxPackets[rxIdx], length - 4);
net_rx_packets[rxIdx] = rxbuf[rxIdx]->FrmData->Dest;
net_process_received_packet(net_rx_packets[rxIdx], length - 4);
bfin_write_DMA1_IRQ_STATUS(DMA_DONE | DMA_ERR);
rxbuf[rxIdx]->StatusWord = 0x00000000;
if ((rxIdx + 1) >= PKTBUFSRX)

@ -466,7 +466,7 @@ static int xgmac_rx(struct eth_device *dev)
length = desc_get_rx_frame_len(rxdesc);
NetReceive(desc_get_buf_addr(rxdesc), length);
net_process_received_packet(desc_get_buf_addr(rxdesc), length);
/* set descriptor back to owned by XGMAC */
desc_set_rx_owner(rxdesc);

@ -846,7 +846,7 @@ static int cpsw_init(struct eth_device *dev, bd_t *bis)
/* submit rx descs */
for (i = 0; i < PKTBUFSRX; i++) {
ret = cpdma_submit(priv, &priv->rx_chan, NetRxPackets[i],
ret = cpdma_submit(priv, &priv->rx_chan, net_rx_packets[i],
PKTSIZE);
if (ret < 0) {
printf("error %d submitting rx desc\n", ret);
@ -905,7 +905,7 @@ static int cpsw_recv(struct eth_device *dev)
while (cpdma_process(priv, &priv->rx_chan, &buffer, &len) >= 0) {
invalidate_dcache_range((unsigned long)buffer,
(unsigned long)buffer + PKTSIZE_ALIGN);
NetReceive(buffer, len);
net_process_received_packet(buffer, len);
cpdma_submit(priv, &priv->rx_chan, buffer, PKTSIZE);
}

@ -188,14 +188,13 @@ static int cs8900_recv(struct eth_device *dev)
if (rxlen > PKTSIZE_ALIGN + PKTALIGN)
debug("packet too big!\n");
for (addr = (u16 *) NetRxPackets[0], i = rxlen >> 1; i > 0;
i--)
for (addr = (u16 *)net_rx_packets[0], i = rxlen >> 1; i > 0; i--)
*addr++ = REG_READ(&priv->regs->rtdata);
if (rxlen & 1)
*addr++ = REG_READ(&priv->regs->rtdata);
/* Pass the packet up to the protocol layers. */
NetReceive (NetRxPackets[0], rxlen);
net_process_received_packet(net_rx_packets[0], rxlen);
return rxlen;
}

@ -700,8 +700,9 @@ static int davinci_eth_rcv_packet (struct eth_device *dev)
unsigned long tmp = (unsigned long)rx_curr_desc->buffer;
invalidate_dcache_range(tmp, tmp + EMAC_RXBUF_SIZE);
NetReceive (rx_curr_desc->buffer,
(rx_curr_desc->buff_off_len & 0xffff));
net_process_received_packet(
rx_curr_desc->buffer,
rx_curr_desc->buff_off_len & 0xffff);
ret = rx_curr_desc->buff_off_len & 0xffff;
}

@ -333,9 +333,11 @@ static int dc21x4x_init(struct eth_device* dev, bd_t* bis)
for (i = 0; i < NUM_RX_DESC; i++) {
rx_ring[i].status = cpu_to_le32(R_OWN);
rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ);
rx_ring[i].buf = cpu_to_le32(phys_to_bus((u32) NetRxPackets[i]));
rx_ring[i].buf = cpu_to_le32(
phys_to_bus((u32)net_rx_packets[i]));
#ifdef CONFIG_TULIP_FIX_DAVICOM
rx_ring[i].next = cpu_to_le32(phys_to_bus((u32) &rx_ring[(i+1) % NUM_RX_DESC]));
rx_ring[i].next = cpu_to_le32(
phys_to_bus((u32)&rx_ring[(i + 1) % NUM_RX_DESC]));
#else
rx_ring[i].next = 0;
#endif
@ -448,7 +450,8 @@ static int dc21x4x_recv(struct eth_device* dev)
/* Pass the packet up to the protocol
* layers.
*/
NetReceive(NetRxPackets[rx_new], length - 4);
net_process_received_packet(
net_rx_packets[rx_new], length - 4);
}
/* Change buffer ownership for this frame, back

@ -374,7 +374,7 @@ static int dw_eth_recv(struct eth_device *dev)
data_end = data_start + roundup(length, ARCH_DMA_MINALIGN);
invalidate_dcache_range(data_start, data_end);
NetReceive(desc_p->dmamac_addr, length);
net_process_received_packet(desc_p->dmamac_addr, length);
/*
* Make the current descriptor valid again and go to

@ -464,7 +464,8 @@ static void dm9000_halt(struct eth_device *netdev)
*/
static int dm9000_rx(struct eth_device *netdev)
{
u8 rxbyte, *rdptr = (u8 *) NetRxPackets[0];
u8 rxbyte;
u8 *rdptr = (u8 *)net_rx_packets[0];
u16 RxStatus, RxLen = 0;
struct board_info *db = &dm9000_info;
@ -525,7 +526,7 @@ static int dm9000_rx(struct eth_device *netdev)
DM9000_DMP_PACKET(__func__ , rdptr, RxLen);
DM9000_DBG("passing packet to upper layer\n");
NetReceive(NetRxPackets[0], RxLen);
net_process_received_packet(net_rx_packets[0], RxLen);
}
}
return 0;

@ -188,12 +188,13 @@ static int dnet_recv(struct eth_device *netdev)
if (cmd_word & 0xDF180000)
printf("%s packet receive error %x\n", __func__, cmd_word);
data_ptr = (unsigned int *) NetRxPackets[0];
data_ptr = (unsigned int *)net_rx_packets[0];
for (i = 0; i < (pkt_len + 3) >> 2; i++)
*data_ptr++ = readl(&dnet->regs->RX_DATA_FIFO);
NetReceive(NetRxPackets[0], pkt_len + 5); /* ok + 5 ?? */
/* ok + 5 ?? */
net_process_received_packet(net_rx_packets[0], pkt_len + 5);
return 0;
}

@ -5158,7 +5158,7 @@ e1000_poll(struct eth_device *nic)
invalidate_dcache_range((unsigned long)packet,
(unsigned long)packet +
roundup(len, ARCH_DMA_MINALIGN));
NetReceive((uchar *)packet, len);
net_process_received_packet((uchar *)packet, len);
fill_rx(hw);
return 1;
}

@ -674,7 +674,8 @@ static int eepro100_recv (struct eth_device *dev)
/* Pass the packet up to the protocol
* layers.
*/
NetReceive((u8 *)rx_ring[rx_next].data, length);
net_process_received_packet((u8 *)rx_ring[rx_next].data,
length);
} else {
/* There was an error.
*/

@ -21,8 +21,8 @@
* enc_miiphy_read(), enc_miiphy_write(), enc_write_hwaddr(),
* enc_init(), enc_recv(), enc_send(), enc_halt()
* ALL other functions assume that the bus has already been claimed!
* Since NetReceive() might call enc_send() in return, the bus must be
* released, NetReceive() called and claimed again.
* Since net_process_received_packet() might call enc_send() in return, the bus
* must be released, net_process_received_packet() called and claimed again.
*/
/*
@ -415,7 +415,7 @@ static void enc_reset_rx_call(enc_dev_t *enc)
*/
static void enc_receive(enc_dev_t *enc)
{
u8 *packet = (u8 *)NetRxPackets[0];
u8 *packet = (u8 *)net_rx_packets[0];
u16 pkt_len;
u16 copy_len;
u16 status;
@ -468,11 +468,12 @@ static void enc_receive(enc_dev_t *enc)
continue;
}
/*
* Because NetReceive() might call enc_send(), we need to
* release the SPI bus, call NetReceive(), reclaim the bus
* Because net_process_received_packet() might call enc_send(),
* we need to release the SPI bus, call
* net_process_received_packet(), reclaim the bus.
*/
enc_release_bus(enc);
NetReceive(packet, pkt_len);
net_process_received_packet(packet, pkt_len);
if (enc_claim_bus(enc))
return;
(void)enc_r8(enc, CTL_REG_EIR);

@ -53,7 +53,7 @@ static void dump_dev(struct eth_device *dev)
printf(" rx_sq.end %p\n", priv->rx_sq.end);
for (i = 0; i < NUMRXDESC; i++)
printf(" rx_buffer[%2.d] %p\n", i, NetRxPackets[i]);
printf(" rx_buffer[%2.d] %p\n", i, net_rx_packets[i]);
printf(" tx_dq.base %p\n", priv->tx_dq.base);
printf(" tx_dq.current %p\n", priv->tx_dq.current);
@ -237,7 +237,7 @@ static int ep93xx_eth_open(struct eth_device *dev, bd_t *bd)
*/
for (i = 0; i < NUMRXDESC; i++) {
/* set buffer address */
(priv->rx_dq.base + i)->word1 = (uint32_t)NetRxPackets[i];
(priv->rx_dq.base + i)->word1 = (uint32_t)net_rx_packets[i];
/* set buffer length, clear buffer index and NSOF */
(priv->rx_dq.base + i)->word2 = PKTSIZE_ALIGN;
@ -310,15 +310,16 @@ static int ep93xx_eth_rcv_packet(struct eth_device *dev)
/*
* We have a good frame. Extract the frame's length
* from the current rx_status_queue entry, and copy
* the frame's data into NetRxPackets[] of the
* the frame's data into net_rx_packets[] of the
* protocol stack. We track the total number of
* bytes in the frame (nbytes_frame) which will be
* used when we pass the data off to the protocol
* layer via NetReceive().
* layer via net_process_received_packet().
*/
len = RX_STATUS_FRAME_LEN(priv->rx_sq.current);
NetReceive((uchar *)priv->rx_dq.current->word1, len);
net_process_received_packet(
(uchar *)priv->rx_dq.current->word1, len);
debug("reporting %d bytes...\n", len);
} else {

@ -267,7 +267,7 @@ static int ethoc_init_ring(struct eth_device *dev)
bd.stat = RX_BD_EMPTY | RX_BD_IRQ;
for (i = 0; i < priv->num_rx; i++) {
bd.addr = (u32)NetRxPackets[i];
bd.addr = (u32)net_rx_packets[i];
if (i == priv->num_rx - 1)
bd.stat |= RX_BD_WRAP;
@ -372,7 +372,7 @@ static int ethoc_rx(struct eth_device *dev, int limit)
if (ethoc_update_rx_stats(&bd) == 0) {
int size = bd.stat >> 16;
size -= 4; /* strip the CRC */
NetReceive((void *)bd.addr, size);
net_process_received_packet((void *)bd.addr, size);
}
/* clear the buffer descriptor so it can be reused */

@ -852,7 +852,7 @@ static int fec_recv(struct eth_device *dev)
swap_packet((uint32_t *)frame->data, frame_length);
#endif
memcpy(buff, frame->data, frame_length);
NetReceive(buff, frame_length);
net_process_received_packet(buff, frame_length);
len = frame_length;
} else {
if (bd_status & FEC_RBD_ERR)

@ -530,7 +530,7 @@ static int fm_eth_recv(struct eth_device *dev)
if (!(status & RxBD_ERROR)) {
data = (u8 *)rxbd->buf_ptr_lo;
len = rxbd->len;
NetReceive(data, len);
net_process_received_packet(data, len);
} else {
printf("%s: Rx error\n", dev->name);
return 0;

@ -244,7 +244,7 @@ static int fec_recv(struct eth_device *dev)
struct fec_info_dma *info = dev->priv;
volatile fecdma_t *fecp = (fecdma_t *) (info->iobase);
cbd_t *pRbd = &info->rxbd[info->rxIdx];
cbd_t *prbd = &info->rxbd[info->rxIdx];
u32 ievent;
int frame_length, len = 0;
@ -276,26 +276,27 @@ static int fec_recv(struct eth_device *dev)
}
}
if (!(pRbd->cbd_sc & BD_ENET_RX_EMPTY)) {
if ((pRbd->cbd_sc & BD_ENET_RX_LAST)
&& !(pRbd->cbd_sc & BD_ENET_RX_ERR)
&& ((pRbd->cbd_datlen - 4) > 14)) {
if (!(prbd->cbd_sc & BD_ENET_RX_EMPTY)) {
if ((prbd->cbd_sc & BD_ENET_RX_LAST) &&
!(prbd->cbd_sc & BD_ENET_RX_ERR) &&
((prbd->cbd_datlen - 4) > 14)) {
/* Get buffer address and size */
frame_length = pRbd->cbd_datlen - 4;
frame_length = prbd->cbd_datlen - 4;
/* Fill the buffer and pass it to upper layers */
NetReceive((uchar *)pRbd->cbd_bufaddr, frame_length);
net_process_received_packet((uchar *)prbd->cbd_bufaddr,
frame_length);
len = frame_length;
}
/* Reset buffer descriptor as empty */
if ((info->rxIdx) == (PKTBUFSRX - 1))
pRbd->cbd_sc = (BD_ENET_RX_WRAP | BD_ENET_RX_EMPTY);
prbd->cbd_sc = (BD_ENET_RX_WRAP | BD_ENET_RX_EMPTY);
else
pRbd->cbd_sc = BD_ENET_RX_EMPTY;
prbd->cbd_sc = BD_ENET_RX_EMPTY;
pRbd->cbd_datlen = PKTSIZE_ALIGN;
prbd->cbd_datlen = PKTSIZE_ALIGN;
/* Now, we have an empty RxBD, restart the DMA receive task */
MCD_continDma(info->rxTask);
@ -399,7 +400,7 @@ static int fec_init(struct eth_device *dev, bd_t * bd)
for (i = 0; i < PKTBUFSRX; i++) {
info->rxbd[i].cbd_sc = BD_ENET_RX_EMPTY;
info->rxbd[i].cbd_datlen = PKTSIZE_ALIGN;
info->rxbd[i].cbd_bufaddr = (uint) NetRxPackets[i];
info->rxbd[i].cbd_bufaddr = (uint) net_rx_packets[i];
}
info->rxbd[PKTBUFSRX - 1].cbd_sc |= BD_ENET_RX_WRAP;

@ -423,7 +423,7 @@ static int ftgmac100_init(struct eth_device *dev, bd_t *bd)
for (i = 0; i < PKTBUFSRX; i++) {
/* RXBUF_BADR */
if (!rxdes[i].rxdes2) {
buf = NetRxPackets[i];
buf = net_rx_packets[i];
rxdes[i].rxdes3 = virt_to_phys(buf);
rxdes[i].rxdes2 = (uint)buf;
}
@ -493,7 +493,7 @@ static int ftgmac100_recv(struct eth_device *dev)
dma_map_single((void *)curr_des->rxdes2, rxlen, DMA_FROM_DEVICE);
/* pass the packet up to the protocol layers. */
NetReceive((void *)curr_des->rxdes2, rxlen);
net_process_received_packet((void *)curr_des->rxdes2, rxlen);
/* release buffer to DMA */
curr_des->rxdes0 &= ~FTGMAC100_RXDES0_RXPKT_RDY;

@ -102,7 +102,7 @@ static int ftmac100_init (struct eth_device *dev, bd_t *bd)
for (i = 0; i < PKTBUFSRX; i++) {
/* RXBUF_BADR */
rxdes[i].rxdes2 = (unsigned int)NetRxPackets[i];
rxdes[i].rxdes2 = (unsigned int)net_rx_packets[i];
rxdes[i].rxdes1 |= FTMAC100_RXDES1_RXBUF_SIZE (PKTSIZE_ALIGN);
rxdes[i].rxdes0 = FTMAC100_RXDES0_RXDMA_OWN;
}
@ -164,7 +164,7 @@ static int ftmac100_recv (struct eth_device *dev)
/* pass the packet up to the protocol layers. */
NetReceive ((void *)curr_des->rxdes2, rxlen);
net_process_received_packet((void *)curr_des->rxdes2, rxlen);
/* release buffer to DMA */

@ -347,7 +347,7 @@ static int ftmac110_recv(struct eth_device *dev)
printf("ftmac110: rx error\n");
} else {
dma_map_single(buf, len, DMA_FROM_DEVICE);
NetReceive(buf, len);
net_process_received_packet(buf, len);
rlen += len;
}

@ -533,7 +533,7 @@ int greth_recv(struct eth_device *dev)
sparc_dcache_flush_all();
/* pass packet on to network subsystem */
NetReceive((void *)d, len);
net_process_received_packet((void *)d, len);
/* bump stats counters */
greth->stats.rx_packets++;

@ -505,7 +505,7 @@ static int keystone2_eth_rcv_packet(struct eth_device *dev)
if (hd == NULL)
return 0;
NetReceive((uchar *)pkt, pkt_size);
net_process_received_packet((uchar *)pkt, pkt_size);
ksnav_release_rxhd(&netcp_pktdma, hd);

@ -321,8 +321,8 @@ static void ks_rcv(struct eth_device *dev, uchar **pv_data)
/* read data block including CRC 4 bytes */
ks_read_qmu(dev, (u16 *)(*pv_data), frame_hdr->len);
/* NetRxPackets buffer size is ok (*pv_data pointer) */
NetReceive(*pv_data, frame_hdr->len);
/* net_rx_packets buffer size is ok (*pv_data) */
net_process_received_packet(*pv_data, frame_hdr->len);
pv_data++;
} else {
ks_wrreg16(dev, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
@ -573,7 +573,7 @@ static int ks8851_mll_recv(struct eth_device *dev)
ks_wrreg16(dev, KS_ISR, status);
if ((status & IRQ_RXI))
ks_rcv(dev, (uchar **)NetRxPackets);
ks_rcv(dev, (uchar **)net_rx_packets);
if ((status & IRQ_LDI)) {
u16 pmecr = ks_rdreg16(dev, KS_PMECR);

@ -568,29 +568,30 @@ static int smc_rcv(struct eth_device *dev)
to send the DWORDs or the bytes first, or some
mixture. A mixture might improve already slow PIO
performance */
SMC_insl(dev, LAN91C96_DATA_HIGH, NetRxPackets[0],
packet_length >> 2);
SMC_insl(dev, LAN91C96_DATA_HIGH, net_rx_packets[0],
packet_length >> 2);
/* read the left over bytes */
if (packet_length & 3) {
int i;
byte *tail = (byte *) (NetRxPackets[0] + (packet_length & ~3));
byte *tail = (byte *)(net_rx_packets[0] +
(packet_length & ~3));
dword leftover = SMC_inl(dev, LAN91C96_DATA_HIGH);
for (i = 0; i < (packet_length & 3); i++)
*tail++ = (byte) (leftover >> (8 * i)) & 0xff;
}
#else
PRINTK3 (" Reading %d words and %d byte(s) \n",
(packet_length >> 1), packet_length & 1);
SMC_insw(dev, LAN91C96_DATA_HIGH, NetRxPackets[0],
packet_length >> 1);
PRINTK3(" Reading %d words and %d byte(s)\n",
(packet_length >> 1), packet_length & 1);
SMC_insw(dev, LAN91C96_DATA_HIGH, net_rx_packets[0],
packet_length >> 1);
#endif /* USE_32_BIT */
#if SMC_DEBUG > 2
printf ("Receiving Packet\n");
print_packet((byte *)NetRxPackets[0], packet_length);
print_packet((byte *)net_rx_packets[0], packet_length);
#endif
} else {
/* error ... */
@ -609,7 +610,7 @@ static int smc_rcv(struct eth_device *dev)
if (!is_error) {
/* Pass the packet up to the protocol layers. */
NetReceive (NetRxPackets[0], packet_length);
net_process_received_packet(net_rx_packets[0], packet_length);
return packet_length;
} else {
return 0;

@ -419,10 +419,12 @@ static int lpc32xx_eth_recv(struct eth_device *dev)
rx_index = readl(&regs->rxconsumeindex);
/* if data was valid, pass it on */
if (!(bufs->rx_stat[rx_index].statusinfo & RX_STAT_ERRORS))
NetReceive(&(bufs->rx_buf[rx_index*PKTSIZE_ALIGN]),
(bufs->rx_stat[rx_index].statusinfo
& RX_STAT_RXSIZE) + 1);
if (!(bufs->rx_stat[rx_index].statusinfo & RX_STAT_ERRORS)) {
net_process_received_packet(
&(bufs->rx_buf[rx_index * PKTSIZE_ALIGN]),
(bufs->rx_stat[rx_index].statusinfo
& RX_STAT_RXSIZE) + 1);
}
/* pass receive slot back to DMA engine */
rx_index = (rx_index + 1) % RX_BUF_COUNT;

@ -347,14 +347,14 @@ static int macb_recv(struct eth_device *netdev)
headlen = 128 * (MACB_RX_RING_SIZE
- macb->rx_tail);
taillen = length - headlen;
memcpy((void *)NetRxPackets[0],
memcpy((void *)net_rx_packets[0],
buffer, headlen);
memcpy((void *)NetRxPackets[0] + headlen,
memcpy((void *)net_rx_packets[0] + headlen,
macb->rx_buffer, taillen);
buffer = (void *)NetRxPackets[0];
buffer = (void *)net_rx_packets[0];
}
NetReceive(buffer, length);
net_process_received_packet(buffer, length);
if (++rx_tail >= MACB_RX_RING_SIZE)
rx_tail = 0;
reclaim_rx_buffers(macb, rx_tail);

@ -219,7 +219,8 @@ int fec_recv(struct eth_device *dev)
length -= 4;
/* Pass the packet up to the protocol layers. */
NetReceive(NetRxPackets[info->rxIdx], length);
net_process_received_packet(net_rx_packets[info->rxIdx],
length);
fecp->eir |= FEC_EIR_RXF;
}
@ -477,7 +478,7 @@ int fec_init(struct eth_device *dev, bd_t * bd)
for (i = 0; i < PKTBUFSRX; i++) {
info->rxbd[i].cbd_sc = BD_ENET_RX_EMPTY;
info->rxbd[i].cbd_datlen = 0; /* Reset */
info->rxbd[i].cbd_bufaddr = (uint) NetRxPackets[i];
info->rxbd[i].cbd_bufaddr = (uint) net_rx_packets[i];
}
info->rxbd[PKTBUFSRX - 1].cbd_sc |= BD_ENET_RX_WRAP;

@ -591,7 +591,8 @@ static int mpc512x_fec_recv (struct eth_device *dev)
rx_buff_idx = frame_length;
if (pRbd->status & FEC_RBD_LAST) {
NetReceive ((uchar*)rx_buff, frame_length);
net_process_received_packet((uchar *)rx_buff,
frame_length);
rx_buff_idx = 0;
}
}

@ -859,7 +859,7 @@ static int mpc5xxx_fec_recv(struct eth_device *dev)
*/
memcpy(buff, frame->head, 14);
memcpy(buff + 14, frame->data, frame_length);
NetReceive(buff, frame_length);
net_process_received_packet(buff, frame_length);
len = frame_length;
}
/*

@ -66,12 +66,12 @@ static int smi_reg_read(const char *devname, u8 phy_adr, u8 reg_ofs, u16 * data)
/* check parameters */
if (phy_adr > PHYADR_MASK) {
printf("Err..(%s) Invalid PHY address %d\n",
__FUNCTION__, phy_adr);
__func__, phy_adr);
return -EFAULT;
}
if (reg_ofs > PHYREG_MASK) {
printf("Err..(%s) Invalid register offset %d\n",
__FUNCTION__, reg_ofs);
__func__, reg_ofs);
return -EFAULT;
}
@ -81,7 +81,7 @@ static int smi_reg_read(const char *devname, u8 phy_adr, u8 reg_ofs, u16 * data)
/* read smi register */
smi_reg = MVGBE_REG_RD(MVGBE_SMI_REG);
if (timeout-- == 0) {
printf("Err..(%s) SMI busy timeout\n", __FUNCTION__);
printf("Err..(%s) SMI busy timeout\n", __func__);
return -EFAULT;
}
} while (smi_reg & MVGBE_PHY_SMI_BUSY_MASK);
@ -102,7 +102,7 @@ static int smi_reg_read(const char *devname, u8 phy_adr, u8 reg_ofs, u16 * data)
smi_reg = MVGBE_REG_RD(MVGBE_SMI_REG);
if (timeout-- == 0) {
printf("Err..(%s) SMI read ready timeout\n",
__FUNCTION__);
__func__);
return -EFAULT;
}
} while (!(smi_reg & MVGBE_PHY_SMI_READ_VALID_MASK));
@ -113,8 +113,8 @@ static int smi_reg_read(const char *devname, u8 phy_adr, u8 reg_ofs, u16 * data)
*data = (u16) (MVGBE_REG_RD(MVGBE_SMI_REG) & MVGBE_PHY_SMI_DATA_MASK);
debug("%s:(adr %d, off %d) value= %04x\n", __FUNCTION__, phy_adr,
reg_ofs, *data);
debug("%s:(adr %d, off %d) value= %04x\n", __func__, phy_adr, reg_ofs,
*data);
return 0;
}
@ -142,11 +142,11 @@ static int smi_reg_write(const char *devname, u8 phy_adr, u8 reg_ofs, u16 data)
/* check parameters */
if (phy_adr > PHYADR_MASK) {
printf("Err..(%s) Invalid phy address\n", __FUNCTION__);
printf("Err..(%s) Invalid phy address\n", __func__);
return -EINVAL;
}
if (reg_ofs > PHYREG_MASK) {
printf("Err..(%s) Invalid register offset\n", __FUNCTION__);
printf("Err..(%s) Invalid register offset\n", __func__);
return -EINVAL;
}
@ -156,7 +156,7 @@ static int smi_reg_write(const char *devname, u8 phy_adr, u8 reg_ofs, u16 data)
/* read smi register */
smi_reg = MVGBE_REG_RD(MVGBE_SMI_REG);
if (timeout-- == 0) {
printf("Err..(%s) SMI busy timeout\n", __FUNCTION__);
printf("Err..(%s) SMI busy timeout\n", __func__);
return -ETIME;
}
} while (smi_reg & MVGBE_PHY_SMI_BUSY_MASK);
@ -583,7 +583,7 @@ static int mvgbe_send(struct eth_device *dev, void *dataptr, int datasize)
if ((cmd_sts & (MVGBE_ERROR_SUMMARY | MVGBE_TX_LAST_FRAME)) ==
(MVGBE_ERROR_SUMMARY | MVGBE_TX_LAST_FRAME) &&
cmd_sts & (MVGBE_UR_ERROR | MVGBE_RL_ERROR)) {
printf("Err..(%s) in xmit packet\n", __FUNCTION__);
printf("Err..(%s) in xmit packet\n", __func__);
return -1;
}
cmd_sts = readl(&p_txdesc->cmd_sts);
@ -604,14 +604,14 @@ static int mvgbe_recv(struct eth_device *dev)
if (timeout < MVGBE_PHY_SMI_TIMEOUT)
timeout++;
else {
debug("%s time out...\n", __FUNCTION__);
debug("%s time out...\n", __func__);
return -1;
}
} while (readl(&p_rxdesc_curr->cmd_sts) & MVGBE_BUFFER_OWNED_BY_DMA);
if (p_rxdesc_curr->byte_cnt != 0) {
debug("%s: Received %d byte Packet @ 0x%x (cmd_sts= %08x)\n",
__FUNCTION__, (u32) p_rxdesc_curr->byte_cnt,
__func__, (u32) p_rxdesc_curr->byte_cnt,
(u32) p_rxdesc_curr->buf_ptr,
(u32) p_rxdesc_curr->cmd_sts);
}
@ -628,21 +628,24 @@ static int mvgbe_recv(struct eth_device *dev)
!= (MVGBE_RX_FIRST_DESC | MVGBE_RX_LAST_DESC)) {
printf("Err..(%s) Dropping packet spread on"
" multiple descriptors\n", __FUNCTION__);
" multiple descriptors\n", __func__);
} else if (cmd_sts & MVGBE_ERROR_SUMMARY) {
printf("Err..(%s) Dropping packet with errors\n",
__FUNCTION__);
__func__);
} else {
/* !!! call higher layer processing */
debug("%s: Sending Received packet to"
" upper layer (NetReceive)\n", __FUNCTION__);
" upper layer (net_process_received_packet)\n",
__func__);
/* let the upper layer handle the packet */
NetReceive((p_rxdesc_curr->buf_ptr + RX_BUF_OFFSET),
(int)(p_rxdesc_curr->byte_cnt - RX_BUF_OFFSET));
net_process_received_packet((p_rxdesc_curr->buf_ptr +
RX_BUF_OFFSET),
(int)(p_rxdesc_curr->byte_cnt -
RX_BUF_OFFSET));
}
/*
* free these descriptors and point next in the ring
@ -747,7 +750,7 @@ error2:
free(dmvgbe);
error1:
printf("Err.. %s Failed to allocate memory\n",
__FUNCTION__);
__func__);
return -1;
}
@ -767,7 +770,7 @@ error1:
#endif
default: /* this should never happen */
printf("Err..(%s) Invalid device number %d\n",
__FUNCTION__, devnum);
__func__, devnum);
return -1;
}

@ -1572,7 +1572,7 @@ static int mvneta_recv(struct eth_device *dev)
* No cache invalidation needed here, since the rx_buffer's are
* located in a uncached memory region
*/
NetReceive(data, rx_bytes);
net_process_received_packet(data, rx_bytes);
}
/* Update rxq management counters */

@ -841,7 +841,8 @@ natsemi_poll(struct eth_device *dev)
rx_status);
retstat = 0;
} else { /* give packet to higher level routine */
NetReceive((rxb + cur_rx * RX_BUF_SIZE), length);
net_process_received_packet((rxb + cur_rx * RX_BUF_SIZE),
length);
retstat = 1;
}

@ -665,7 +665,7 @@ void uboot_push_packet_len(int len) {
dp83902a_recv(&pbuf[0], len);
/*Just pass it to the upper layer*/
NetReceive(&pbuf[0], len);
net_process_received_packet(&pbuf[0], len);
}
void uboot_push_tx_done(int key, int val) {

@ -809,11 +809,13 @@ ns8382x_poll(struct eth_device *dev)
if ((rx_status & (DescMore | DescPktOK | DescRxLong)) != DescPktOK) {
/* corrupted packet received */
printf("ns8382x_poll: Corrupted packet, status:%lx\n", rx_status);
printf("ns8382x_poll: Corrupted packet, status:%lx\n",
rx_status);
retstat = 0;
} else {
/* give packet to higher level routine */
NetReceive((rxb + cur_rx * RX_BUF_SIZE), length);
net_process_received_packet((rxb + cur_rx * RX_BUF_SIZE),
length);
retstat = 1;
}

@ -297,7 +297,7 @@ static int pch_gbe_recv(struct eth_device *dev)
buffer_addr = pci_mem_to_phys(priv->bdf, rx_desc->buffer_addr);
length = rx_desc->rx_words_eob - 3 - ETH_FCS_LEN;
NetReceive((uchar *)buffer_addr, length);
net_process_received_packet((uchar *)buffer_addr, length);
/* Test the wrap-around condition */
if (++priv->rx_idx >= PCH_GBE_DESC_NUM)

@ -507,7 +507,7 @@ static int pcnet_recv (struct eth_device *dev)
buf = (*lp->rx_buf)[lp->cur_rx];
invalidate_dcache_range((unsigned long)buf,
(unsigned long)buf + pkt_len);
NetReceive(buf, pkt_len);
net_process_received_packet(buf, pkt_len);
PCNET_DEBUG2("Rx%d: %d bytes from 0x%p\n",
lp->cur_rx, pkt_len, buf);
}

@ -504,11 +504,11 @@ static int rtl_poll(struct eth_device *dev)
memcpy(rxdata, rx_ring + ring_offs + 4, semi_count);
memcpy(&(rxdata[semi_count]), rx_ring, rx_size-4-semi_count);
NetReceive(rxdata, length);
net_process_received_packet(rxdata, length);
debug_cond(DEBUG_RX, "rx packet %d+%d bytes",
semi_count, rx_size-4-semi_count);
} else {
NetReceive(rx_ring + ring_offs + 4, length);
net_process_received_packet(rx_ring + ring_offs + 4, length);
debug_cond(DEBUG_RX, "rx packet %d bytes", rx_size-4);
}
flush_cache((unsigned long)rx_ring, RX_BUF_LEN);

@ -538,7 +538,7 @@ static int rtl_recv(struct eth_device *dev)
cpu_to_le32(bus_to_phys(tpc->RxBufferRing[cur_rx]));
rtl_flush_rx_desc(&tpc->RxDescArray[cur_rx]);
NetReceive(rxdata, length);
net_process_received_packet(rxdata, length);
} else {
puts("Error Rx");
}

@ -127,7 +127,7 @@ int sh_eth_recv(struct eth_device *dev)
packet = (uchar *)
ADDR_TO_P2(port_info->rx_desc_cur->rd2);
invalidate_cache(packet, len);
NetReceive(packet, len);
net_process_received_packet(packet, len);
}
/* Make current descriptor available again */

@ -756,35 +756,35 @@ static int smc_rcv(struct eth_device *dev)
#ifdef USE_32_BIT
PRINTK3(" Reading %d dwords (and %d bytes) \n",
PRINTK3(" Reading %d dwords (and %d bytes)\n",
packet_length >> 2, packet_length & 3 );
/* QUESTION: Like in the TX routine, do I want
to send the DWORDs or the bytes first, or some
mixture. A mixture might improve already slow PIO
performance */
SMC_insl( dev, SMC91111_DATA_REG, NetRxPackets[0],
packet_length >> 2 );
SMC_insl(dev, SMC91111_DATA_REG, net_rx_packets[0],
packet_length >> 2);
/* read the left over bytes */
if (packet_length & 3) {
int i;
byte *tail = (byte *)(NetRxPackets[0] +
byte *tail = (byte *)(net_rx_packets[0] +
(packet_length & ~3));
dword leftover = SMC_inl(dev, SMC91111_DATA_REG);
for (i=0; i<(packet_length & 3); i++)
*tail++ = (byte) (leftover >> (8*i)) & 0xff;
}
#else
PRINTK3(" Reading %d words and %d byte(s) \n",
PRINTK3(" Reading %d words and %d byte(s)\n",
(packet_length >> 1 ), packet_length & 1 );
SMC_insw(dev, SMC91111_DATA_REG , NetRxPackets[0],
packet_length >> 1);
SMC_insw(dev, SMC91111_DATA_REG , net_rx_packets[0],
packet_length >> 1);
#endif /* USE_32_BIT */
#if SMC_DEBUG > 2
printf("Receiving Packet\n");
print_packet( NetRxPackets[0], packet_length );
print_packet(net_rx_packets[0], packet_length);
#endif
} else {
/* error ... */
@ -815,7 +815,7 @@ static int smc_rcv(struct eth_device *dev)
if (!is_error) {
/* Pass the packet up to the protocol layers. */
NetReceive(NetRxPackets[0], packet_length);
net_process_received_packet(net_rx_packets[0], packet_length);
return packet_length;
} else {
return 0;

@ -192,7 +192,7 @@ static void smc911x_halt(struct eth_device *dev)
static int smc911x_rx(struct eth_device *dev)
{
u32 *data = (u32 *)NetRxPackets[0];
u32 *data = (u32 *)net_rx_packets[0];
u32 pktlen, tmplen;
u32 status;
@ -211,7 +211,7 @@ static int smc911x_rx(struct eth_device *dev)
": dropped bad packet. Status: 0x%08x\n",
status);
else
NetReceive(NetRxPackets[0], pktlen);
net_process_received_packet(net_rx_packets[0], pktlen);
}
return 0;

@ -437,10 +437,10 @@ static int sunxi_emac_eth_recv(struct eth_device *dev)
printf("Received packet is too big (len=%d)\n", rx_len);
} else {
emac_inblk_32bit((void *)&regs->rx_io_data,
NetRxPackets[0], rx_len);
net_rx_packets[0], rx_len);
/* Pass to upper layer */
NetReceive(NetRxPackets[0], rx_len);
net_process_received_packet(net_rx_packets[0], rx_len);
return rx_len;
}
}

@ -287,7 +287,7 @@ void redundant_init(struct eth_device *dev)
}
}
if (!memcmp(pkt, (void *)NetRxPackets[rx_idx], sizeof(pkt)))
if (!memcmp(pkt, (void *)net_rx_packets[rx_idx], sizeof(pkt)))
fail = 0;
out_be16(&rxbd[rx_idx].length, 0);
@ -343,7 +343,7 @@ static void startup_tsec(struct eth_device *dev)
for (i = 0; i < PKTBUFSRX; i++) {
out_be16(&rxbd[i].status, RXBD_EMPTY);
out_be16(&rxbd[i].length, 0);
out_be32(&rxbd[i].bufptr, (u32)NetRxPackets[i]);
out_be32(&rxbd[i].bufptr, (u32)net_rx_packets[i]);
}
status = in_be16(&rxbd[PKTBUFSRX - 1].status);
out_be16(&rxbd[PKTBUFSRX - 1].status, status | RXBD_WRAP);
@ -430,7 +430,8 @@ static int tsec_recv(struct eth_device *dev)
/* Send the packet up if there were no errors */
if (!(status & RXBD_STATS))
NetReceive(NetRxPackets[rx_idx], length - 4);
net_process_received_packet(net_rx_packets[rx_idx],
length - 4);
else
printf("Got error %x\n", (status & RXBD_STATS));

@ -804,11 +804,11 @@ static int tsi108_eth_probe (struct eth_device *dev, bd_t * bis)
rx_descr_current = rx_descr;
for (index = 0; index < NUM_RX_DESC; index++) {
/* make sure the receive buffers are not in cache */
invalidate_dcache_range((unsigned long)NetRxPackets[index],
(unsigned long)NetRxPackets[index] +
invalidate_dcache_range((unsigned long)net_rx_packets[index],
(unsigned long)net_rx_packets[index] +
RX_BUFFER_SIZE);
rx_descr->start_addr0 =
cpu_to_le32((vuint32) NetRxPackets[index]);
cpu_to_le32((vuint32) net_rx_packets[index]);
rx_descr->start_addr1 = 0;
rx_descr->next_descr_addr0 =
cpu_to_le32((vuint32) (rx_descr + 1));
@ -966,7 +966,7 @@ static int tsi108_eth_recv (struct eth_device *dev)
/*** process packet ***/
buffer = (uchar *)(le32_to_cpu(rx_descr->start_addr0));
NetReceive(buffer, length);
net_process_received_packet(buffer, length);
invalidate_dcache_range ((unsigned long)buffer,
(unsigned long)buffer +

@ -587,7 +587,8 @@ static int uli526x_rx_packet(struct eth_device *dev)
__FUNCTION__, i, rxptr->rx_buf_ptr[i]);
#endif
NetReceive((uchar *)rxptr->rx_buf_ptr, rxlen);
net_process_received_packet(
(uchar *)rxptr->rx_buf_ptr, rxlen);
uli526x_reuse_buf(rxptr);
} else {
@ -709,7 +710,7 @@ static void allocate_rx_buffer(struct uli526x_board_info *db)
u32 addr;
for (index = 0; index < RX_DESC_CNT; index++) {
addr = (u32)NetRxPackets[index];
addr = (u32)net_rx_packets[index];
addr += (16 - (addr & 15));
rxptr->rx_buf_ptr = (char *) addr;
rxptr->rdes2 = cpu_to_le32(addr);

@ -556,7 +556,7 @@ static int axiemac_recv(struct eth_device *dev)
#endif
/* Pass the received frame up for processing */
if (length)
NetReceive(rxframe, length);
net_process_received_packet(rxframe, length);
#ifdef DEBUG
/* It is useful to clear buffer to be sure that it is consistent */

@ -322,7 +322,7 @@ static int emaclite_recv(struct eth_device *dev)
out_be32 (baseaddress + XEL_RSR_OFFSET, reg);
debug("Packet receive from 0x%x, length %dB\n", baseaddress, length);
NetReceive((uchar *) etherrxbuff, length);
net_process_received_packet((uchar *)etherrxbuff, length);
return length;
}

@ -48,7 +48,7 @@ int ll_temac_reset_fifo(struct eth_device *dev)
int ll_temac_recv_fifo(struct eth_device *dev)
{
int i, length = 0;
u32 *buf = (u32 *)NetRxPackets[0];
u32 *buf = (u32 *)net_rx_packets[0];
struct ll_temac *ll_temac = dev->priv;
struct fifo_ctrl *fifo_ctrl = (void *)ll_temac->ctrladdr;
@ -93,7 +93,7 @@ int ll_temac_recv_fifo(struct eth_device *dev)
for (i = 0; i < length; i += 4)
*buf++ = in_be32(&fifo_ctrl->rdfd);
NetReceive(NetRxPackets[0], length);
net_process_received_packet(net_rx_packets[0], length);
}
return 0;

@ -180,7 +180,7 @@ int ll_temac_init_sdma(struct eth_device *dev)
memset(rx_dp, 0, sizeof(*rx_dp));
rx_dp->next_p = rx_dp;
rx_dp->buf_len = PKTSIZE_ALIGN;
rx_dp->phys_buf_p = (u8 *)NetRxPackets[i];
rx_dp->phys_buf_p = (u8 *)net_rx_packets[i];
flush_cache((u32)rx_dp->phys_buf_p, PKTSIZE_ALIGN);
}
flush_cache((u32)cdmac_bd.rx, sizeof(cdmac_bd.rx));
@ -316,7 +316,7 @@ int ll_temac_recv_sdma(struct eth_device *dev)
ll_temac->out32(ra[RX_TAILDESC_PTR], (int)&cdmac_bd.rx[rx_idx]);
if (length > 0 && pb_idx != -1)
NetReceive(NetRxPackets[pb_idx], length);
net_process_received_packet(net_rx_packets[pb_idx], length);
return 0;
}

@ -439,7 +439,7 @@ static int zynq_gem_recv(struct eth_device *dev)
u32 size = roundup(frame_len, ARCH_DMA_MINALIGN);
invalidate_dcache_range(addr, addr + size);
NetReceive((u8 *)addr, frame_len);
net_process_received_packet((u8 *)addr, frame_len);
if (current_bd->status & ZYNQ_GEM_RXBUF_SOF_MASK)
priv->rx_first_buf = priv->rxbd_current;

@ -1333,7 +1333,7 @@ static int uec_recv(struct eth_device* dev)
if (!(status & RxBD_ERROR)) {
data = BD_DATA(bd);
len = BD_LENGTH(bd);
NetReceive(data, len);
net_process_received_packet(data, len);
} else {
printf("%s: Rx error\n", dev->name);
}

@ -534,7 +534,8 @@ static int asix_recv(struct eth_device *eth)
}
/* Notify net stack */
NetReceive(buf_ptr + sizeof(packet_len), packet_len);
net_process_received_packet(buf_ptr + sizeof(packet_len),
packet_len);
/* Adjust for next iteration. Packets are padded to 16-bits */
if (packet_len & 1)

@ -558,7 +558,7 @@ static int asix_recv(struct eth_device *eth)
frame_pos += 2;
NetReceive(recv_buf + frame_pos, pkt_len);
net_process_received_packet(recv_buf + frame_pos, pkt_len);
pkt_hdr++;
frame_pos += ((pkt_len + 7) & 0xFFF8)-2;

@ -600,7 +600,7 @@ static int mcs7830_recv(struct eth_device *eth)
if (sts == STAT_RX_FRAME_CORRECT) {
debug("%s() got a frame, len=%d\n", __func__, gotlen);
NetReceive(buf, gotlen);
net_process_received_packet(buf, gotlen);
return 0;
}

@ -760,7 +760,8 @@ static int smsc95xx_recv(struct eth_device *eth)
}
/* Notify net stack */
NetReceive(buf_ptr + sizeof(packet_len), packet_len - 4);
net_process_received_packet(buf_ptr + sizeof(packet_len),
packet_len - 4);
/* Adjust for next iteration */
actual_len -= sizeof(packet_len) + packet_len;

@ -1522,7 +1522,7 @@ static int rx_submit(struct eth_dev *dev, struct usb_request *req,
* RNDIS headers involve variable numbers of LE32 values.
*/
req->buf = (u8 *) NetRxPackets[0];
req->buf = (u8 *)net_rx_packets[0];
req->length = size;
req->complete = rx_complete;
@ -2446,7 +2446,8 @@ static int usb_eth_recv(struct eth_device *netdev)
if (packet_received) {
debug("%s: packet received\n", __func__);
if (dev->rx_req) {
NetReceive(NetRxPackets[0], dev->rx_req->length);
net_process_received_packet(net_rx_packets[0],
dev->rx_req->length);
packet_received = 0;
rx_submit(dev, dev->rx_req, 0);

@ -482,11 +482,7 @@ extern u8 net_server_ethaddr[6]; /* Boot server enet address */
extern struct in_addr net_ip; /* Our IP addr (0 = unknown) */
extern struct in_addr net_server_ip; /* Server IP addr (0 = unknown) */
extern uchar *net_tx_packet; /* THE transmit packet */
#ifdef CONFIG_DM_ETH
extern uchar *net_rx_packets[PKTBUFSRX]; /* Receive packets */
#else
extern uchar *NetRxPackets[PKTBUFSRX]; /* Receive packets */
#endif
extern uchar *net_rx_packet; /* Current receive packet */
extern int net_rx_packet_len; /* Current rx packet length */
extern unsigned NetIPID; /* IP ID (counting) */
@ -640,9 +636,6 @@ static inline void net_send_packet(uchar *pkt, int len)
int net_send_udp_packet(uchar *ether, struct in_addr dest, int dport,
int sport, int payload_len);
#ifndef CONFIG_DM_ETH
#define NetReceive(in_packet, len) net_process_received_packet(in_packet, len)
#endif
/* Processes a received packet */
void net_process_received_packet(uchar *in_packet, int len);

@ -178,13 +178,8 @@ int NetTimeOffset;
#endif
static uchar net_pkt_buf[(PKTBUFSRX+1) * PKTSIZE_ALIGN + PKTALIGN];
#ifdef CONFIG_DM_ETH
/* Receive packets */
uchar *net_rx_packets[PKTBUFSRX];
#else
/* Receive packet */
uchar *NetRxPackets[PKTBUFSRX];
#endif
/* Current UDP RX packet handler */
static rxhand_f *udp_packet_handler;
/* Current ARP RX packet handler */
@ -303,16 +298,10 @@ void net_init(void)
net_tx_packet = &net_pkt_buf[0] + (PKTALIGN - 1);
net_tx_packet -= (ulong)net_tx_packet % PKTALIGN;
#ifdef CONFIG_DM_ETH
for (i = 0; i < PKTBUFSRX; i++) {
net_rx_packets[i] = net_tx_packet +
(i + 1) * PKTSIZE_ALIGN;
}
#else
for (i = 0; i < PKTBUFSRX; i++)
NetRxPackets[i] = net_tx_packet +
(i + 1) * PKTSIZE_ALIGN;
#endif
ArpInit();
net_clear_handlers();

@ -212,7 +212,7 @@ static void scc_init (int scc_index)
for (i = 0; i < PKTBUFSRX; i++) {
rtx->rxbd[i].cbd_sc = BD_ENET_RX_EMPTY;
rtx->rxbd[i].cbd_datlen = 0; /* Reset */
rtx->rxbd[i].cbd_bufaddr = (uint) NetRxPackets[i];
rtx->rxbd[i].cbd_bufaddr = (uint) net_rx_packets[i];
}
rtx->rxbd[PKTBUFSRX - 1].cbd_sc |= BD_ENET_RX_WRAP;
@ -405,8 +405,8 @@ static int scc_recv (int index, void *packet, int max_length)
if (!(rtx->rxbd[rxIdx].cbd_sc & 0x003f)) {
length = rtx->rxbd[rxIdx].cbd_datlen - 4;
memcpy (packet,
(void *) (NetRxPackets[rxIdx]),
length < max_length ? length : max_length);
(void *)(net_rx_packets[rxIdx]),
length < max_length ? length : max_length);
}
/* Give the buffer back to the SCC. */

Loading…
Cancel
Save