分享好友

×
取消 复制
看块设备驱动部分的笔记(4)
2020-06-28 16:03:36

request_queue

相关数据结构:

请求队列中两个比较重要的数据结构是: request_queue 和 request

struct request_queue
{
/*
* Together with queue_head for cacheline sharing
*/
struct list_head queue_head;
struct request *last_merge;
elevator_t *elevator;

/*
* the queue request freelist, one for reads and one for writes
*/
struct request_list rq;

request_fn_proc *request_fn;
merge_request_fn *back_merge_fn;
merge_request_fn *front_merge_fn;
merge_requests_fn *merge_requests_fn;
make_request_fn *make_request_fn;
prep_rq_fn *prep_rq_fn;
unplug_fn *unplug_fn;
merge_bvec_fn *merge_bvec_fn;
activity_fn *activity_fn;
issue_flush_fn *issue_flush_fn;
prepare_flush_fn *prepare_flush_fn;
end_flush_fn *end_flush_fn;

/*
* Auto-unplugging state
*/
struct timer_list unplug_timer;
int unplug_thresh; /* After this many requests */
unsigned long unplug_delay; /* After this many jiffies */
struct work_struct unplug_work;

struct backing_dev_info backing_dev_info;

/*
* The queue owner gets to use this for whatever they like.
* ll_rw_blk doesn't touch it.
*/
void *queuedata;

void *activity_data;

/*
* queue needs bounce pages for pages above this limit
*/
unsigned long bounce_pfn;
unsigned int bounce_gfp;

/*
* various queue flags, see QUEUE_* below
*/
unsigned long queue_flags;

/*
* protects queue structures from reentrancy. ->__queue_lock should
* _never_ be used directly, it is queue private. always use
* ->queue_lock.
*/
spinlock_t __queue_lock;
spinlock_t *queue_lock;

/*
* queue kobject
*/
struct kobject kobj;

/*
* queue settings
*/
unsigned long nr_requests; /* Max # of requests */
unsigned int nr_congestion_on;
unsigned int nr_congestion_off;
unsigned int nr_batching;

unsigned short max_sectors;
unsigned short max_hw_sectors;
unsigned short max_phys_segments;
unsigned short max_hw_segments;
unsigned short hardsect_size;
unsigned int max_segment_size;

unsigned long seg_boundary_mask;
unsigned int dma_alignment;

struct blk_queue_tag *queue_tags;

atomic_t refcnt;

unsigned int in_flight;

/*
* sg stuff
*/
unsigned int sg_timeout;
unsigned int sg_reserved_size;

struct list_head drain_list;

/*
* reserved for flush operations
*/
struct request *flush_rq;
unsigned char ordered;
};



------------------------------------------------------------------

/*
* try to put the fields that are referenced together in the same cacheline
*/
struct request {
struct list_head queuelist; /* looking for ->queue? you must _not_
* access it directly, use
* blkdev_dequeue_request! */
unsigned long flags; /* see REQ_ bits below */

/* Maintain bio traversal state for part by part I/O submission.
* hard_* are block layer internals, no driver should touch them!
*/

sector_t sector; /* next sector to submit */
unsigned long nr_sectors; /* no. of sectors left to submit */
/* no. of sectors left to submit in the current segment */
unsigned int current_nr_sectors;

sector_t hard_sector; /* next sector to complete */
unsigned long hard_nr_sectors; /* no. of sectors left to complete */
/* no. of sectors left to complete in the current segment */
unsigned int hard_cur_sectors;

struct bio *bio;
struct bio *biotail;

void *elevator_private;

int rq_status; /* should split this into a few status bits */
struct gendisk *rq_disk;
int errors;
unsigned long start_time;

/* Number of scatter-gather DMA addr+len pairs after
* physical address coalescing is performed.
*/
unsigned short nr_phys_segments;

/* Number of scatter-gather addr+len pairs after
* physical and DMA remapping hardware coalescing is performed.
* This is the number of scatter-gather entries the driver
* will actually have to deal with after DMA mapping is done.
*/
unsigned short nr_hw_segments;

int tag;
char *buffer;

int ref_count;
request_queue_t *q;
struct request_list *rl;

struct completion *waiting;
void *special;

/*
* when request is used as a packet command carrier
*/
unsigned int cmd_len;
unsigned char cmd[BLK_MAX_CDB];

unsigned int data_len;
void *data;

unsigned int sense_len;
void *sense;

unsigned int timeout;

/*
* For Power Management requests
*/
struct request_pm_state *pm;

/*
* completion callback. end_io_data should be folded in with waiting
*/
rq_end_io_fn *end_io;
void *end_io_data;
};

每一个对于块设备的请求都用一个request描述符来表示.
而每个请求又包含一个或多个bio结构.

对于一个请求队列(request_queue),其所有的request描述符形成一个双向链表.


文章来源CU社区:看块设备驱动部分的笔记


分享好友

分享这个小栈给你的朋友们,一起进步吧。

内核源码
创建时间:2020-05-18 13:36:55
内核源码精华帖内容汇总
展开
订阅须知

• 所有用户可根据关注领域订阅专区或所有专区

• 付费订阅:虚拟交易,一经交易不退款;若特殊情况,可3日内客服咨询

• 专区发布评论属默认订阅所评论专区(除付费小栈外)

技术专家

查看更多
  • 飘絮絮絮丶
    专家
戳我,来吐槽~