绑定完请刷新页面
取消
刷新

分享好友

×
取消 复制
内核中的TCP的追踪分析-13-TCP(IPV4)的socket连接-续5
2020-05-25 15:01:35

今天继续看dev_queue_xmit()函数,昨天我们说了它是将我们已经准备的skb包发送出去,看一下函数代码
int dev_queue_xmit(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
struct Qdisc *q;
int rc = -ENOMEM;
/* GSO will handle the following emulations directly. */
if (netif_needs_gso(dev, skb))
goto gso;
if (skb_shinfo(skb)->frag_list &&
!(dev->features & NETIF_F_FRAGLIST) &&
__skb_linearize(skb))
goto out_kfree_skb;
/* Fragmented skb is linearized if device does not support SG,
* or if at least one of fragments is in highmem and device
* does not support DMA from it/*****wumingxiaozu*******/.
*/
if (skb_shinfo(skb)->nr_frags &&
(!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
__skb_linearize(skb))
goto out_kfree_skb;
/* If packet is not checksummed and device does not support
* checksumming for this protocol, complete checksumming here.
*/
if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb_set_transport_header(skb, skb->csum_start -
skb_headroom(skb));
if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
goto out_kfree_skb;
}
我们分段来看,这个函数在/net/core/dev.c中的1637行处,函数首先是取得代表网卡设备的数据结构struct net_device,这个结构非常大我们前几节都看到过了,我们不对这个结构进行相关的描述,代码中首先涉及到了GSO它的意思是Generic Segmentation Offload,
虽然是英文的还是应该学习一下,GSO与网卡密切些,还有另外二种称为TSO和UFO,我们接触到的时候再来分析,这里进入netif_needs_gso()
static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
{
return skb_is_gso(skb) &&
(!skb_gso_ok(skb, dev->features) ||
unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
}
这里检查一下我们的数据包中是否设定了gso,如果设定了然后就跳到下边的gso的标号处执行了。函数下边是对数据片断和校验和的检查和计算,读过了前边章节的内容相信朋友们很容易理解这些函数况且还有英文注释的很详细,我们继续往下看
gso:
spin_lock_prefetch(&dev->queue_lock);/*****wumingxiaozu*******/
/* Disable soft irqs for various locks below. Also
* stops preemption for RCU.
*/
rcu_read_lock_bh();
/* Updates of qdisc are serialized by queue_lock.
* The struct Qdisc which is pointed to by qdisc is now a
* rcu structure - it may be accessed without acquiring
* a lock (but the structure may be stale.) The freeing of the
* qdisc will be deferred until it's known that there are no
* more references to it.
*
* If the qdisc has an enqueue function, we still need to
* hold the queue_lock before calling it, since queue_lock
* also serializes access to the device queue.
*/
q = rcu_dereference(dev->qdisc);
#ifdef CONFIG_NET_CLS_ACT
skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
#endif
if (q->enqueue) {
/* Grab device queue */
spin_lock(&dev->queue_lock);
q = dev->qdisc;
if (q->enqueue) {
/* reset queue_mapping to zero */
skb_set_queue_mapping(skb, 0);
rc = q->enqueue(skb, q);
qdisc_run(dev);
spin_unlock(&dev->queue_lock);
rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
goto out;
}
spin_unlock(&dev->queue_lock);
}
/* The device has no queue. Common case for software devices:
loopback, all the sorts of tunnels...
Really, it is unlikely that netif_tx_lock protection is necessary
here. (f.e. loopback and IP tunnels are clean ignoring statistics
counters.)
However, it is possible, that they rely on protection
made by us here.
Check this and shot the lock. It is not prone from deadlocks.
Either shot noqueue qdisc, it is even simpler 8)
*/
if (dev->flags & IFF_UP) {
int cpu = smp_processor_id(); /* ok because BHs are off */
if (dev->xmit_lock_owner != cpu) {
HARD_TX_LOCK(dev, cpu);
if (!netif_queue_stopped(dev) &&
!netif_subqueue_stopped(dev, skb)) {
rc = 0;
if (!dev_hard_start_xmit(skb, dev)) {
HARD_TX_UNLOCK(dev);
goto out;
}
}
HARD_TX_UNLOCK(dev);
if (net_ratelimit())
printk(KERN_CRIT "Virtual device %%s asks to "
"queue packet!\n", dev->name);
} else {
/* Recursion is detected! It is possible,
* unfortunately */
if (net_ratelimit())
printk(KERN_CRIT "Dead loop on virtual device "
"%%s, fix it urgently!\n", dev->name);
}
}
rc = -ENETDOWN;
rcu_read_unlock_bh();
out_kfree_skb:
kfree_skb(skb);
return rc;
out:
rcu_read_unlock_bh();
return rc;
}
我们在函数开头处看到了struct Qdisc 结构指针变量q,这个结构体是用于分析队列规则所使用的,dev_queue_xmit()函数是将数据包以队列的形势管理和发送的,所以这个结构体起着至关重要的作用
struct Qdisc
{
int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev);
struct sk_buff * (*dequeue)(struct Qdisc *dev);
unsigned flags;
#define TCQ_F_BUILTIN 1
#define TCQ_F_THROTTLED 2
#define TCQ_F_INGRESS 4
int padded;
struct Qdisc_ops *ops;
u32 handle;
u32 parent;
atomic_t refcnt;
struct sk_buff_head q;
struct net_device *dev;
struct list_head list;
struct gnet_stats_basic bstats;
struct gnet_stats_queue qstats;
struct gnet_stats_rate_est rate_est;
spinlock_t *stats_lock;
struct rcu_head q_rcu;
int (*reshape_fail)(struct sk_buff *skb,
struct Qdisc *q);
/*****wumingxiaozu*******/
/* This field is deprecated, but it is still used by CBQ
* and it will live until better solution will be invented.
*/
struct Qdisc *__parent;
};
这个结构中的enqueue函数指针,是插入队列的操作,而dequeue则是从队列中摘取的操作。我们结合函数代码来看,关于锁的内容先跳过,我们看到是从网卡的设备结构中先取得其要使用的规则q = rcu_dereference(dev->qdisc);然后查看规则中是否定义了插入队列的操作,如果定义了就会执行rc = q->enqueue(skb, q);即调用网卡设备注册到系统中的插入队列函数。这是在网卡设备驱动程序中完成注册登记到内核的,我们来看一下,这里以 cs8900网卡和DM9000网卡中的初始化代码为例,它们的驱动程序代码分别在/drivers/net/cs89x0.c和dm9000.c中,在这二个驱动程序的代码中我们可以看到他们的初始化函数cs89x0_probe()和dm9000_probe(),这二个函数都是网卡的初始化安装函数,我们可以看到在他们的初始化函数中终都调用了register_netdev()将他们在上边初始化好的函数登记注册到内核中,而register_netdev()是调用了register_netdevice()函数,进一步调用了dev_init_scheduler()函数
void dev_init_scheduler(struct net_device *dev)
{
qdisc_lock_tree(dev);
dev->qdisc = &noop_qdisc;
dev->qdisc_sleeping = &noop_qdisc;
INIT_LIST_HEAD(&dev->qdisc_list);
qdisc_unlock_tree(dev);
setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev);
}
在这个函数中为cs8900和dm9000的网卡设备设置了qdisc的队列规则为noop_qdisc。
struct Qdisc noop_qdisc = {
.enqueue = noop_enqueue,
.dequeue = noop_dequeue,
.flags = TCQ_F_BUILTIN,
.ops = &noop_qdisc_ops,
.list = LIST_HEAD_INIT(noop_qdisc.list),
};
我们看到这个规则中调用了noop_enqueue和noop_dequeue。
static int noop_enqueue(struct sk_buff *skb, struct Qdisc * qdisc)
{
kfree_skb(skb);
return NET_XMIT_CN;
}
static struct sk_buff *noop_dequeue(struct Qdisc * qdisc)
{
return NULL;
}
插入队列中将数据包丢弃释放掉了,摘取队列的操作却什么也没有得到。显然这个队列规则只是临时使用,那么什么时候正式登记队列规则呢?我们知道要想启用网卡,让网卡进入运行状态除了通过ifconfig配置ip外还要ifconfig eth0 up启动网卡才行。Ifconfig会调用socket的ioctl,进入下面的流程
ioctl()->sock_ioctl()->dev_ioctl()->SIOCSIFFLAGS-> dev_ifsioc()->dev_change_flags()->IFF_UP->dev_open()
我们看到其后进入了内核中的dev_open()
int dev_open(struct net_device *dev)
{
。。。。。。
*/
dev_activate(dev);/*****wumingxiaozu*******/
。。。。。。
}
代码中调用了dev_activate()重新对网卡的队列规则qdisc进行设置
void dev_activate(struct net_device *dev)
{
/* No queueing discipline is attached to device;
create default one i.e. pfifo_fast for devices,
which need queueing and noqueue_qdisc for
virtual interfaces
*/
if (dev->qdisc_sleeping == &noop_qdisc) {
struct Qdisc *qdisc;
if (dev->tx_queue_len) {
qdisc = qdisc_create_dflt(dev, &pfifo_fast_ops,
TC_H_ROOT);
if (qdisc == NULL) {
printk(KERN_INFO "%%s: activation failed\n", dev->name);
return;
}
list_add_tail(&qdisc->list, &dev->qdisc_list);
} else {
qdisc = &noqueue_qdisc;
}
dev->qdisc_sleeping = qdisc;
}
if (!netif_carrier_ok(dev))
/* Delay activation until next carrier-on event */
return;
spin_lock_bh(&dev->queue_lock);
rcu_assign_pointer(dev->qdisc, dev->qdisc_sleeping);
if (dev->qdisc != &noqueue_qdisc) {
dev->trans_start = jiffies;
dev_watchdog_up(dev);
}
spin_unlock_bh(&dev->queue_lock);
}
我是无名小卒,转载请注明出处
http://qinjiana0786.cublog.cn

上面代码中使用qdisc_create_dflt()函数重新为网卡创建了一个队列规则qdisc。之后赋值给网卡的结构dev中。这样就正式建立了cs8900和dm9000网卡的队列规则。我们注意这个规则保存到了qdisc_sleeping中,qdisc_sleeping是为了网卡处于激活状态时使用的规则,而网卡处于关闭或者未插入网线时,会使用noqueue_qdisc规则,我们看到他发送数据包时的插入队列操作时就是丢掉释放掉数据包。我们来看分配规则的过程
struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops,
unsigned int parentid)
{
struct Qdisc *sch;
sch = qdisc_alloc(dev, ops);
if (IS_ERR(sch))
goto errout;
sch->stats_lock = &dev->queue_lock;
sch->parent = parentid;
if (!ops->init || ops->init(sch, NULL) == 0)
return sch;
qdisc_destroy(sch);
errout:
return NULL;
}
在dev_activate()调用这个函数的时候,传递给这里的struct Qdisc_ops 结构参数指针是&pfifo_fast_ops,struct Qdisc_ops结构是用于队列操作的结构体
struct Qdisc_ops
{
struct Qdisc_ops *next;
const struct Qdisc_class_ops *cl_ops;
char id[IFNAMSIZ];
int priv_size;
int (*enqueue)(struct sk_buff *, struct Qdisc *);
struct sk_buff * (*dequeue)(struct Qdisc *);
int (*requeue)(struct sk_buff *, struct Qdisc *);
unsigned int (*drop)(struct Qdisc *);
int (*init)(struct Qdisc *, struct nlattr *arg);
void (*reset)(struct Qdisc *);
void (*destroy)(struct Qdisc *);
int (*change)(struct Qdisc *, struct nlattr *arg);
int (*dump)(struct Qdisc *, struct sk_buff *);
int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
struct module *owner;
};
里面大多是函数指针,我们先不解释了。我们看一下传递下来的pfifo_fast_ops结构变量
static struct Qdisc_ops pfifo_fast_ops __read_mostly = {
.id = "pfifo_fast",
.priv_size = PFIFO_FAST_BANDS * sizeof(struct sk_buff_head),
.enqueue = pfifo_fast_enqueue,
.dequeue = pfifo_fast_dequeue,
.requeue = pfifo_fast_requeue,
.init = pfifo_fast_init,
.reset = pfifo_fast_reset,
.dump = pfifo_fast_dump,
.owner = THIS_MODULE,
};
这是一个先进先出的队列操作结构变量。我们在qdisc_create_dflt()函数中看到调用了qdisc_alloc()函数
struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops)
{
void *p;
struct Qdisc *sch;
unsigned int size;
int err = -ENOBUFS;/*****wumingxiaozu*******/
/* ensure that the Qdisc and the private data are 32-byte aligned */
size = QDISC_ALIGN(sizeof(*sch));
size += ops->priv_size + (QDISC_ALIGNTO - 1);
p = kzalloc(size, GFP_KERNEL);
if (!p)
goto errout;
sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
sch->padded = (char *) sch - (char *) p;
INIT_LIST_HEAD(&sch->list);
skb_queue_head_init(&sch->q);
sch->ops = ops;
sch->enqueue = ops->enqueue;
sch->dequeue = ops->dequeue;
sch->dev = dev;
dev_hold(dev);
atomic_set(&sch->refcnt, 1);
return sch;
errout:
return ERR_PTR(err);
}
很显然我们的队列规则中的插入和摘下的函数指针被赋值为上面的先进先出的队列操作函数指针pfifo_fast_enqueue和pfifo_fast_dequeue。函数的其余部分很简单我们不看了,然后在qdisc_create_dflt()函数中调用了初始化3个队列的操作函数
static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt)
{
int prio;
struct sk_buff_head *list = qdisc_priv(qdisc);
for (prio = 0; prio PFIFO_FAST_BANDS; prio++)
skb_queue_head_init(list + prio);
return 0;
}
所以网卡激活后,使用的Qdisc的规则是上面我们看到的先进先出队列操作结构中指定的。我们可能听说过Qos接口编程,是通过调用dev_graft_qdisc()创建自己定义的队列规则和队列操作函数替换掉网卡设备中的规则和操作函数。这个函数我们不看了。这里我们直接返回到dev_queue_xmit()函数中,继续往下分析我们前面讲到了这里发送数据包就要执行队列中的插入队列的函数指针,所以rc = q->enqueue(skb, q);这句代码结合上面的先进先出的赋值的函数指针是pfifo_fast_enqueue()
static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
{
struct sk_buff_head *list = prio2list(skb, qdisc);
if (skb_queue_len(list) qdisc->dev->tx_queue_len) {
qdisc->q.qlen++;
return __qdisc_enqueue_tail(skb, qdisc, list);
}
return qdisc_drop(skb, qdisc);
}
这个函数根据数据包的优先级将数据包插入到队列的尾部,这个函数我们就不细看了。我们继续看dev_queue_xmit()函数,我们看到
if (dev->flags & IFF_UP) {
int cpu = smp_processor_id(); /* ok because BHs are off */
if (dev->xmit_lock_owner != cpu) {
HARD_TX_LOCK(dev, cpu);
if (!netif_queue_stopped(dev) &&
!netif_subqueue_stopped(dev, skb)) {
rc = 0;
if (!dev_hard_start_xmit(skb, dev)) {
HARD_TX_UNLOCK(dev);
goto out;
}
}
HARD_TX_UNLOCK(dev);
if (net_ratelimit())
printk(KERN_CRIT "Virtual device %%s asks to "
"queue packet!\n", dev->name);
} else {
/* Recursion is detected! It is possible,
* unfortunately */
if (net_ratelimit())/*****wumingxiaozu*******/
printk(KERN_CRIT "Dead loop on virtual device "
"%%s, fix it urgently!\n", dev->name);
}
}
如果网卡设备已经处于工作的打开状态,如果其标志位表明可以发送数据的话就要进入dev_hard_start_xmit()函数开始发送
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
if (likely(!skb->next)) {
if (!list_empty(&ptype_all))
dev_queue_xmit_nit(skb, dev);
if (netif_needs_gso(dev, skb)) {
if (unlikely(dev_gso_segment(skb)))
goto out_kfree_skb;
if (skb->next)
goto gso;
}
return dev->hard_start_xmit(skb, dev);
}
gso:
do {
struct sk_buff *nskb = skb->next;
int rc;
skb->next = nskb->next;
nskb->next = NULL;
rc = dev->hard_start_xmit(nskb, dev);
if (unlikely(rc)) {
nskb->next = skb->next;
skb->next = nskb;
return rc;
}
if (unlikely((netif_queue_stopped(dev) ||
netif_subqueue_stopped(dev, skb)) &&
skb->next))
return NETDEV_TX_BUSY;
} while (skb->next);
skb->destructor = DEV_GSO_CB(skb)->destructor;
out_kfree_skb:
kfree_skb(skb);
return 0;
}
同样我们只围绕重点发送来看,这里重要的地方就是调用网卡设备 dev->hard_start_xmit()开始发送队列中的数据包。拿我们上边的cs8900和dm9000来说他们在初始化函数中分别设置了各自的发送数据包函数,cs8900中的是通过dev->hard_start_xmit = net_send_packet;而dm9000中是通过ndev->hard_start_xmit = &dm9000_start_xmit;
我们看一下这二个驱动中的发送函数,首先是cs8900的发送函数
static int net_send_packet(struct sk_buff *skb, struct net_device *dev)
{
struct net_local *lp = netdev_priv(dev);
if (net_debug > 3) {
printk("%%s: sent %%d byte packet of type %%x\n",
dev->name, skb->len,
(skb->data[ETH_ALEN+ETH_ALEN] 8) | skb->data[ETH_ALEN+ETH_ALEN+1]);
}
/* keep the upload from being interrupted, since we
ask the chip to start transmitting before the
whole packet has been completely uploaded. */
spin_lock_irq(&lp->lock);
netif_stop_queue(dev);
/* initiate a transmit sequence */
writeword(dev->base_addr, TX_CMD_PORT, lp->send_cmd);
writeword(dev->base_addr, TX_LEN_PORT, skb->len);
/* Test to see if the chip has allocated memory for the packet */
if ((readreg(dev, PP_BusST) & READY_FOR_TX_NOW) == 0) {
/*
* Gasp! It hasn't. But that shouldn't happen since
* we're waiting for TxOk, so return 1 and requeue this packet.
*/
spin_unlock_irq(&lp->lock);
if (net_debug) printk("cs89x0: Tx buffer not free!\n");
return 1;
}
/* Write the contents of the packet */
writewords(dev->base_addr, TX_FRAME_PORT,skb->data,(skb->len+1) >>1);
spin_unlock_irq(&lp->lock);
lp->stats.tx_bytes += skb->len;
dev->trans_start = jiffies;
dev_kfree_skb (skb);
/*
* We DO NOT call netif_wake_queue() here.
* We also DO NOT call netif_start_queue().
*
* Either of these would cause another bottom half run through
* net_send_packet() before this packet has fully gone out. That causes
* us to hit the "Gasp!" above and the send is rescheduled. it runs like
* a dog. We just return and wait for the Tx completion interrupt handler
* to restart the netdevice layer
*/
return 0;
}
我们看到上面的代码除了对硬件的相关操作以外重要的是writewords(dev->base_addr, TX_FRAME_PORT,skb->data,(skb->len+1) >>1);将数据交由网卡负责发送出去。发送后要调用dev_kfree_skb()释放掉发送的数据包。同样我们看一下dm9000的函数
static int
dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned long flags;
board_info_t *db = (board_info_t *) dev->priv;
dm9000_dbg(db, 3, "%%s:\n", __func__);
if (db->tx_pkt_cnt > 1)
return 1;
spin_lock_irqsave(&db->lock, flags);
/* Move data to DM9000 TX RAM */
writeb(DM9000_MWCMD, db->io_addr);
(db->outblk)(db->io_data, skb->data, skb->len);
dev->stats.tx_bytes += skb->len;
db->tx_pkt_cnt++;
/* TX control: First packet immediately send, second packet queue */
if (db->tx_pkt_cnt == 1) {
/* Set TX length to DM9000 */
iow(db, DM9000_TXPLL, skb->len);
iow(db, DM9000_TXPLH, skb->len >> 8);
/* Issue TX polling command */
iow(db, DM9000_TCR, TCR_TXREQ); /* Cleared after TX complete */
dev->trans_start = jiffies; /* save the time stamp */
} else {
/* Second packet */
db->queue_pkt_len = skb->len;/*****wumingxiaozu*******/
netif_stop_queue(dev);
}
spin_unlock_irqrestore(&db->lock, flags);
/* free this SKB */
dev_kfree_skb(skb);
return 0;
}
二个网卡的发送过程不同,但是无非都是对发送数据包交由网卡来完成,在上面的dm9000发送函数中,我们看到了对其寄存器和所带的ram的操作,具体过程因电路的不同而不同这也就是有驱动移植的由来,我们看到上面重要的发送语句是(db->outblk)(db->io_data, skb->data, skb->len);这里调用了其驱动中设置的函数。
static void dm9000_outblk_32bit(void __iomem *reg, void *data, int count)
{
writesl(reg, data, (count+3) >> 2);
}
至于writewords和witesl终都要调用__raw_writel()函数向指定硬件的io地址处写出数据。至此我们的连接请求数据包发送到了对方网卡中。我是无名小卒,本文系独立分析完成,由于分析量大错误在所难免也希望朋友们能批评指正。谢谢!


文章来源CU社区:内核中的TCP的追踪分析-13-TCP(IPV4)的socket连接-续5


分享好友

分享这个小栈给你的朋友们,一起进步吧。

内核源码
创建时间:2020-05-18 13:36:55
内核源码精华帖内容汇总
展开
订阅须知

• 所有用户可根据关注领域订阅专区或所有专区

• 付费订阅:虚拟交易,一经交易不退款;若特殊情况,可3日内客服咨询

• 专区发布评论属默认订阅所评论专区(除付费小栈外)

技术专家

查看更多
  • 飘絮絮絮丶
    专家
戳我,来吐槽~