绑定完请刷新页面
取消
刷新

分享好友

×
取消 复制
改写drivers/net/8139too.c(2)
2020-06-23 16:15:11

static void __set_rx_mode (struct net_device *dev)
{////(leave)
struct xc_priv *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->ioaddr;
u32 mc_filter[2]; /* Multicast hash filter */
int i, rx_mode;
u32 tmp;

struct dev_mc_list *mclist;
rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
i++, mclist = mclist->next) {
int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;

mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
rx_mode |= AcceptMulticast;
}

/* We can safely update without stopping the chip. */
tmp = xc_rx_config | rx_mode;
if (tp->rx_config != tmp) {
iowrite32(tmp,ioaddr+RxConfig);
ioread32(ioaddr+RxConfig);
tp->rx_config = tmp;
}
iowrite32(mc_filter[0],ioaddr+MAR0 + 0);
iowrite32(mc_filter[1],ioaddr+MAR0 + 4);
}

static void xc_set_multicast_list(struct net_device *dev)
{
unsigned long flags;
struct xc_priv *tp = netdev_priv(dev);

spin_lock_irqsave (&tp->lock, flags);
__set_rx_mode(dev);
spin_unlock_irqrestore (&tp->lock, flags);
}

static void xc_reset(void __iomem *ioaddr)
{
int i;
/* Soft reset the chip. */
iowrite8(CmdReset,ioaddr+ChipCmd);
/* Check that the chip has finished the reset. */
for (i = 1000; i > 0; i--) {
barrier();
if ((ioread8(ioaddr+ChipCmd) & CmdReset) == 0)
break;
udelay (10);
}
}

static void xc_cleanup_dev(struct net_device *dev)
{
struct xc_priv *tp = netdev_priv(dev);
if(tp->pdev == NULL || dev == NULL)
return;
if(tp->ioaddr)
pci_iounmap(tp->pdev,tp->ioaddr);
pci_release_regions(tp->pdev);
free_netdev(dev);
pci_set_drvdata(tp->pdev,NULL);
}

static int xc_rx(struct net_device *dev, struct xc_priv *tp,int budget)
{
u16 status;
struct sk_buff *skb;
int packet_size,data_size;
int work_done = 0;
void __iomem *ioaddr = tp->ioaddr;
unsigned long cur_rx = tp->cur_rx;

while(netif_running(dev) && work_done < budget && ((ioread8(ioaddr+ChipCmd) & RxBufEmpty) == 0))
{
u32 tmp_size;
u32 offset = cur_rx % RX_BUF_LEN;
rmb();
tmp_size = le32_to_cpu(*(u32 *)(tp->rx_bufs+offset));
packet_size = tmp_size >> 16;
data_size = packet_size - 4;
skb = dev_alloc_skb(data_size + 2);
if(likely(skb))
{
skb->dev = dev;
skb_reserve(skb,2);
eth_copy_and_sum (skb, &tp->rx_bufs[offset + 4],data_size,0);
skb_put(skb,data_size);
skb->protocol = eth_type_trans (skb, dev);
dev->last_rx = jiffies;
tp->stats.rx_bytes += data_size;
tp->stats.rx_packets++;
netif_receive_skb (skb);
}
else
tp->stats.rx_dropped++;
++work_done;
cur_rx = (cur_rx + packet_size + 4 + 3) & ~3;/*更新接收offset*/
iowrite16((u16)(cur_rx - 16),ioaddr+RxBufPtr);/*更新当前包读取地址*/

status = ioread16(ioaddr+IntrStatus) & RxAckBits;
/* Clear out errors and receive interrupts */
if (likely(status != 0))
{
iowrite16 (RxAckBits,ioaddr+IntrStatus);
ioread16(ioaddr+IntrStatus);
}
}
tp->cur_rx = cur_rx;
return work_done;
}

static int xc_poll(struct net_device *dev, int *budget)
{
struct xc_priv *tp = netdev_priv(dev);
int mbudget = min(*budget, dev->quota);
int work_done = 0;
void __iomem *ioaddr = tp->ioaddr;

if(mbudget <= 0)
return -1;
spin_lock(&tp->rx_lock);
if(likely(ioread16(ioaddr+IntrStatus) & RxAckBits))
{
work_done = xc_rx(dev,tp,mbudget);
*budget -= work_done;
dev->quota -= work_done;
}
if(mbudget > work_done)
{
local_irq_disable();
iowrite16(xc_intr_mask,ioaddr+IntrMask);
__netif_rx_complete(dev);
local_irq_enable();
}
spin_unlock(&tp->rx_lock);
return !(work_done < mbudget);
}

static int xc_tx(struct sk_buff *skb, struct net_device *dev)
{
struct xc_priv *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->ioaddr;
unsigned int entry = tp->cur_tx % 4;
unsigned int len = skb->len;
if(skb->len < 1536)/*定义的发送缓冲区大小*/
{
if (len < ETH_ZLEN)
memset(tp->tx_buf[entry], 0, ETH_ZLEN);
skb_copy_and_csum_dev(skb, tp->tx_buf[entry]);
dev_kfree_skb(skb);
}
else
{
dev_kfree_skb(skb);/*释放skb*/
tp->stats.tx_dropped++;/*更新统计数据*/
return 0;
}
spin_lock_irq(&tp->lock);
iowrite32(tp->tx_flag | max(len, (unsigned int)ETH_ZLEN),
ioaddr+TxStatus0 + (entry * sizeof (u32)));/*状态register存放发送包大小*/
dev->trans_start = jiffies;/*设置发送开始时间*/
tp->cur_tx++;/*设置下一个发送要用的register*/
wmb();
if(tp->cur_tx - 4 == tp->dirty_tx)
netif_stop_queue(dev);
spin_unlock_irq(&tp->lock);
return 0;
}

static void tx_interrupt (struct net_device *dev,
struct xc_priv *tp,void __iomem *ioaddr)
{
unsigned long dirty_tx,tx_left;
if(dev == NULL || ioaddr == NULL)
return;
dirty_tx = tp->dirty_tx;
tx_left = tp->cur_tx - dirty_tx;
while (tx_left > 0)
{
int entry = dirty_tx % 4;
int txstatus;
txstatus = ioread32(ioaddr+TxStatus0 + (entry * sizeof (u32)));
if (!(txstatus & (TxStatOK | TxUnderrun | TxAborted)))
break;/* It still hasn't been Txed */
dirty_tx++;
tx_left--;
}
if (tp->dirty_tx != dirty_tx)
{
tp->dirty_tx = dirty_tx;
mb();
netif_wake_queue (dev);
}
}

static irqreturn_t xc_interrupt(int irq, void *dev_inst,struct pt_regs *regs)
{
u16 status,ackstat;
struct net_device *dev = (struct net_device *)dev_inst;
struct xc_priv *tp = netdev_priv(dev);
int handle = 0;
int link_changed = 0;
void __iomem *ioaddr = tp->ioaddr;

spin_lock(&tp->lock);
status = ioread16(ioaddr+IntrStatus);
if(unlikely(status & xc_intr_mask) == 0) goto out;
handle = 1;
if (unlikely(status == 0xFFFF)) goto out;
if (unlikely(!netif_running(dev))) {iowrite16(0,ioaddr+IntrMask);goto out;}
if (unlikely(status & RxUnderrun))
link_changed = ioread16(ioaddr+CSCR) & 0x0800;
ackstat = status & ~(RxAckBits | TxErr);
if (ackstat) iowrite16(ackstat,ioaddr+IntrStatus);
if(status & RxAckBits)
{
if(netif_rx_schedule_prep(dev))
{
iowrite16(xc_norx_intr_mask,ioaddr+IntrStatus);
__netif_rx_schedule(dev);
}
}
if(status & (TxOK | TxErr))
{
tx_interrupt(dev, tp, ioaddr);
if(status & TxErr)
iowrite16(TxErr,ioaddr+IntrStatus);
}
out:
spin_unlock (&tp->lock);
return IRQ_RETVAL(handle);
}

static void xc_hw_start(struct net_device *dev)
{
int i;
u8 tmp;
struct xc_priv *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->ioaddr;
if(rtl_chip_info[tp->chipset].flags & HasHltClk)
iowrite8('R',ioaddr+HltClk);
xc_reset(ioaddr);

iowrite8(Cfg9346_Unlock,ioaddr+Cfg9346);
iowrite32(cpu_to_le32(*(u32*)(dev->dev_addr+0)),ioaddr+MAC0+0);
iowrite32(cpu_to_le32(*(u32*)(dev->dev_addr+4)),ioaddr+MAC0+4);
iowrite8(CmdRxEnb | CmdTxEnb,ioaddr+ChipCmd);
tp->rx_config = xc_rx_config | AcceptBroadcast | AcceptMyPhys;
iowrite32(tp->rx_config,ioaddr+RxConfig);
iowrite32(xc_rx_config,ioaddr+TxConfig);
tp->cur_rx = 0;
xc_check_media (dev, 1);
if (tp->chipset >= CH_8139B)
iowrite8(ioread8(ioaddr+Config3) & ~Cfg3_Magic,ioaddr+Config3);
iowrite8(Cfg9346_lock,ioaddr+Cfg9346);

iowrite32(tp->rx_bufs_dma,ioaddr+RxBuf);
ioread32(ioaddr+RxBuf);
for (i = 0; i < 4; i++)
{
iowrite32(tp->tx_bufs_dma + (tp->tx_buf - tp->tx_bufs),
ioaddr+TxAddr0 + (i * 4));
ioread32(ioaddr+TxAddr0 + (i * 4));
}
iowrite32(0,ioaddr+RxMissed);
xc_set_multicast_list(dev);
iowrite16(ioread16(ioaddr+MultiIntr) & 0xF000,ioaddr+MultiIntr);
tmp = ioread8(ioaddr+ChipCmd);
if(!(tmp & CmdRxEnb) || !(tmp & CmdTxEnb))
iowrite8(CmdRxEnb | CmdTxEnb,ioaddr+ChipCmd);
iowrite16(xc_intr_mask,ioaddr+IntrMask);
}

static int xc_open(struct net_device *dev)
{
int rc,i;
struct xc_priv *tp = netdev_priv(dev);;
rc = request_irq (dev->irq, xc_interrupt, SA_SHIRQ, dev->name, dev);
if(rc) return rc;
tp->tx_bufs = pci_alloc_consistent(tp->pdev, 1536*4,&tp->tx_bufs_dma);
tp->rx_bufs = pci_alloc_consistent(tp->pdev, RX_BUF_TOT_LEN,&tp->rx_bufs_dma);
if(tp->rx_bufs == NULL || tp->tx_bufs == NULL)
{
free_irq(dev->irq, dev);
if(tp->tx_bufs)
pci_free_consistent(tp->pdev, 1536*4,tp->tx_bufs,tp->tx_bufs_dma);
if(tp->rx_bufs)
pci_free_consistent(tp->pdev, RX_BUF_TOT_LEN,tp->rx_bufs,tp->rx_bufs_dma);
return -ENOMEM;
}
tp->mii.full_duplex = tp->mii.force_media;
tp->tx_flag = (256 << 11) & 0x003f0000;
tp->cur_rx = 0;
tp->cur_tx = 0;
tp->dirty_tx = 0;
for (i = 0; i < 4; i++)
tp->tx_buf = &tp->tx_bufs[i * 1536];
xc_hw_start(dev);
netif_start_queue(dev);
return 0;
}

static int xc_stop(struct net_device *dev)
{
unsigned long flags;
struct xc_priv *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->ioaddr;
netif_stop_queue(dev);

spin_lock_irqsave(&tp->lock,flags);
iowrite8(0,ioaddr+ChipCmd);
iowrite16(0,ioaddr+IntrMask);
tp->stats.rx_missed_errors += ioread32(ioaddr+RxMissed);
iowrite32(0,ioaddr+RxMissed);
spin_unlock_irqrestore (&tp->lock, flags);

synchronize_irq (dev->irq);/* racy, but that's ok here */
free_irq (dev->irq, dev);
tp->cur_tx = 0;
tp->dirty_tx = 0;
pci_free_consistent(tp->pdev, RX_BUF_TOT_LEN,tp->rx_bufs, tp->rx_bufs_dma);
pci_free_consistent(tp->pdev, 1536*4,tp->tx_bufs, tp->tx_bufs_dma);
tp->rx_bufs = NULL;
tp->tx_bufs = NULL;
/* Green! Put the chip in low-power mode. */
iowrite8(Cfg9346_Unlock,ioaddr+Cfg9346);
if (rtl_chip_info[tp->chipset].flags & HasHltClk)
iowrite8('H',ioaddr+HltClk);
return 0;
}


文章来源CU社区:改写drivers/net/8139too.c

分享好友

分享这个小栈给你的朋友们,一起进步吧。

内核源码
创建时间:2020-05-18 13:36:55
内核源码精华帖内容汇总
展开
订阅须知

• 所有用户可根据关注领域订阅专区或所有专区

• 付费订阅:虚拟交易,一经交易不退款;若特殊情况,可3日内客服咨询

• 专区发布评论属默认订阅所评论专区(除付费小栈外)

技术专家

查看更多
  • 飘絮絮絮丶
    专家
戳我,来吐槽~