请教块设备驱动的问题
时间:2010-08-24
来源:互联网
大家好,我的sd卡驱动(未用linux自带的)是用的blk_init_queue,采用__make_request处理bio,然后发送request,建立线程处理request的做法。
以前设置的一次性最大传输长度是PAGE_SIZE,也就是4K,现在希望改成64k提高速度。
linux kernel是2.6.34
我在读写前加入card_queue_map_sg 将bio map到内部申请的bounce_buf中,卡底层读写用bounce_buf,然后读后调用card_queue_bounce_post,写前调用card_queue_bounce_pre,将连续内存bounce_buf的内容再copy回 bio中,这样似乎是绕过了req->buffer,因为本来__make_request传下来应该用req->buffer,但是这个buffer有时并不是连续的,但我认为在底层直接修改bio应该可行。但是实际应该是内容有错,所以运行数据有误,出现乱码打印,但并未提示别的错误。我猜应该是读写内容有错,类似打印提示符时打印出的乱码
在mount前用的是4k及4k以内的单位来读取,所以mount成功,此后用8k来读取数据在跨页时有问题。
[ 4.010000] cardblksd:RRR unknown partition table
[ 4.030000] EXT2-fs (cardblksd): warning: mounting unchecked fs, running e2fsck is recommended
[ 4.040000] VFS: Mounted root (ext2 filesystem) on device 254:0.
[ 4.040000] Freeing init memory: 108K
[ 4.040000] ;B#t}Odh!H0 8:b;ppd3
:>1 $tuy8+c<9-;%$++)-3z1-3 88ab>fi|"0__g1-`TE80 0h004+g8+&fd)-0|:--|_8"-6+ !9++8"0-3
可能我的做法有问题,那么为什么不能在跨层直接修改bio?在__make_request中并未走到elv merge,一个request一次。io schedule是cfq,
所以init_request_from_bio应该只是把bio的page_address直接赋值给req->buffer
我贴一下map_sg,pre,post,和issue_rq函数,类似mmc的做法
/*
* Prepare the sg list(s) to be handed of to the host driver
*/
unsigned int card_queue_map_sg(struct card_queue *cq)
{
unsigned int sg_len;
size_t buflen;
struct scatterlist *sg;
int i;
if (!cq->bounce_buf)
return blk_rq_map_sg(cq->queue, cq->req, cq->sg);
BUG_ON(!cq->bounce_sg);
sg_len = blk_rq_map_sg(cq->queue, cq->req, cq->bounce_sg);
cq->bounce_sg_len = sg_len;
buflen = 0;
for_each_sg(cq->bounce_sg, sg, sg_len, i)
buflen += sg->length;
sg_init_one(cq->sg, cq->bounce_buf, buflen);
return 1;
}
/*
* If writing, bounce the data to the buffer before the request
* is sent to the host driver
*/
void card_queue_bounce_pre(struct card_queue *cq)
{
unsigned long flags;
if (!cq->bounce_buf)
return;
if (rq_data_dir(cq->req) != WRITE)
return;
local_irq_save(flags);
sg_copy_to_buffer(cq->bounce_sg, cq->bounce_sg_len,
cq->bounce_buf, cq->sg[0].length);
local_irq_restore(flags);
}
/*
* If reading, bounce the data from the buffer after the request
* has been handled by the host driver
*/
void card_queue_bounce_post(struct card_queue *cq)
{
unsigned long flags;
if (!cq->bounce_buf)
return;
if (rq_data_dir(cq->req) != READ)
return;
local_irq_save(flags);
sg_copy_from_buffer(cq->bounce_sg, cq->bounce_sg_len,
cq->bounce_buf, cq->sg[0].length);
local_irq_restore(flags);
}
static int card_blk_issue_rq(struct card_queue *cq, struct request *req)
{
struct card_blk_data *card_data = cq->data;
struct memory_card *card = card_data->queue.card;
struct card_blk_request brq;
int ret;
int i;
//printk("card issue request %d sector num: %d\n", req->sector, req->nr_sectors);
if (card_claim_card(card)) {
spin_lock_irq(&card_data->lock);
ret = 1;
while (ret) {
ret = __blk_end_request(req, -EIO, (1 << card_data->block_bits));
}
spin_unlock_irq(&card_data->lock);
return 0;
}
do {
brq.crq.cmd = rq_data_dir(req);
//brq.crq.buf = req->buffer;
if(cq->bounce_buf)
brq.crq.buf = cq->bounce_buf;
else
brq.crq.buf = req->buffer;
brq.card_data.lba = blk_rq_pos(req);
brq.card_data.blk_size = 1 << card_data->block_bits;
brq.card_data.blk_nums = blk_rq_sectors(req);
printk("(%d, %x, %d)\n", brq.card_data.blk_nums, brq.card_data.lba, brq.card_data.blk_size );
brq.card_data.sg = cq->sg;
memset(brq.card_data.sg, 0 , sizeof(brq.card_data.sg));
// brq.card_data.sg_len = blk_rq_map_sg(req->q, req, brq.card_data.sg);
brq.card_data.sg_len = card_queue_map_sg(cq);
card->host->card_type = card->card_type;
card_queue_bounce_pre(cq);
card_wait_for_req(card->host, &brq);
card_queue_bounce_post(cq);
/*
*the request issue failed
*/
if (brq.card_data.error) {
card_release_host(card->host);
spin_lock_irq(&card_data->lock);
ret = 1;
while (ret) {
ret = __blk_end_request(req, -EIO, (1 << card_data->block_bits));
}
spin_unlock_irq(&card_data->lock);
return 0;
}
/*
* A block was successfully transferred.
*/
spin_lock_irq(&card_data->lock);
brq.card_data.bytes_xfered = brq.card_data.blk_size * brq.card_data.blk_nums;
ret = __blk_end_request(req, 0, brq.card_data.bytes_xfered);
spin_unlock_irq(&card_data->lock);
} while (ret);
card_release_host(card->host);
return 1;
}
谢谢大家
以前设置的一次性最大传输长度是PAGE_SIZE,也就是4K,现在希望改成64k提高速度。
linux kernel是2.6.34
我在读写前加入card_queue_map_sg 将bio map到内部申请的bounce_buf中,卡底层读写用bounce_buf,然后读后调用card_queue_bounce_post,写前调用card_queue_bounce_pre,将连续内存bounce_buf的内容再copy回 bio中,这样似乎是绕过了req->buffer,因为本来__make_request传下来应该用req->buffer,但是这个buffer有时并不是连续的,但我认为在底层直接修改bio应该可行。但是实际应该是内容有错,所以运行数据有误,出现乱码打印,但并未提示别的错误。我猜应该是读写内容有错,类似打印提示符时打印出的乱码
在mount前用的是4k及4k以内的单位来读取,所以mount成功,此后用8k来读取数据在跨页时有问题。
[ 4.010000] cardblksd:RRR unknown partition table
[ 4.030000] EXT2-fs (cardblksd): warning: mounting unchecked fs, running e2fsck is recommended
[ 4.040000] VFS: Mounted root (ext2 filesystem) on device 254:0.
[ 4.040000] Freeing init memory: 108K
[ 4.040000] ;B#t}Odh!H0 8:b;ppd3
:>1 $tuy8+c<9-;%$++)-3z1-3 88ab>fi|"0__g1-`TE80 0h004+g8+&fd)-0|:--|_8"-6+ !9++8"0-3
可能我的做法有问题,那么为什么不能在跨层直接修改bio?在__make_request中并未走到elv merge,一个request一次。io schedule是cfq,
所以init_request_from_bio应该只是把bio的page_address直接赋值给req->buffer
我贴一下map_sg,pre,post,和issue_rq函数,类似mmc的做法
/*
* Prepare the sg list(s) to be handed of to the host driver
*/
unsigned int card_queue_map_sg(struct card_queue *cq)
{
unsigned int sg_len;
size_t buflen;
struct scatterlist *sg;
int i;
if (!cq->bounce_buf)
return blk_rq_map_sg(cq->queue, cq->req, cq->sg);
BUG_ON(!cq->bounce_sg);
sg_len = blk_rq_map_sg(cq->queue, cq->req, cq->bounce_sg);
cq->bounce_sg_len = sg_len;
buflen = 0;
for_each_sg(cq->bounce_sg, sg, sg_len, i)
buflen += sg->length;
sg_init_one(cq->sg, cq->bounce_buf, buflen);
return 1;
}
/*
* If writing, bounce the data to the buffer before the request
* is sent to the host driver
*/
void card_queue_bounce_pre(struct card_queue *cq)
{
unsigned long flags;
if (!cq->bounce_buf)
return;
if (rq_data_dir(cq->req) != WRITE)
return;
local_irq_save(flags);
sg_copy_to_buffer(cq->bounce_sg, cq->bounce_sg_len,
cq->bounce_buf, cq->sg[0].length);
local_irq_restore(flags);
}
/*
* If reading, bounce the data from the buffer after the request
* has been handled by the host driver
*/
void card_queue_bounce_post(struct card_queue *cq)
{
unsigned long flags;
if (!cq->bounce_buf)
return;
if (rq_data_dir(cq->req) != READ)
return;
local_irq_save(flags);
sg_copy_from_buffer(cq->bounce_sg, cq->bounce_sg_len,
cq->bounce_buf, cq->sg[0].length);
local_irq_restore(flags);
}
static int card_blk_issue_rq(struct card_queue *cq, struct request *req)
{
struct card_blk_data *card_data = cq->data;
struct memory_card *card = card_data->queue.card;
struct card_blk_request brq;
int ret;
int i;
//printk("card issue request %d sector num: %d\n", req->sector, req->nr_sectors);
if (card_claim_card(card)) {
spin_lock_irq(&card_data->lock);
ret = 1;
while (ret) {
ret = __blk_end_request(req, -EIO, (1 << card_data->block_bits));
}
spin_unlock_irq(&card_data->lock);
return 0;
}
do {
brq.crq.cmd = rq_data_dir(req);
//brq.crq.buf = req->buffer;
if(cq->bounce_buf)
brq.crq.buf = cq->bounce_buf;
else
brq.crq.buf = req->buffer;
brq.card_data.lba = blk_rq_pos(req);
brq.card_data.blk_size = 1 << card_data->block_bits;
brq.card_data.blk_nums = blk_rq_sectors(req);
printk("(%d, %x, %d)\n", brq.card_data.blk_nums, brq.card_data.lba, brq.card_data.blk_size );
brq.card_data.sg = cq->sg;
memset(brq.card_data.sg, 0 , sizeof(brq.card_data.sg));
// brq.card_data.sg_len = blk_rq_map_sg(req->q, req, brq.card_data.sg);
brq.card_data.sg_len = card_queue_map_sg(cq);
card->host->card_type = card->card_type;
card_queue_bounce_pre(cq);
card_wait_for_req(card->host, &brq);
card_queue_bounce_post(cq);
/*
*the request issue failed
*/
if (brq.card_data.error) {
card_release_host(card->host);
spin_lock_irq(&card_data->lock);
ret = 1;
while (ret) {
ret = __blk_end_request(req, -EIO, (1 << card_data->block_bits));
}
spin_unlock_irq(&card_data->lock);
return 0;
}
/*
* A block was successfully transferred.
*/
spin_lock_irq(&card_data->lock);
brq.card_data.bytes_xfered = brq.card_data.blk_size * brq.card_data.blk_nums;
ret = __blk_end_request(req, 0, brq.card_data.bytes_xfered);
spin_unlock_irq(&card_data->lock);
} while (ret);
card_release_host(card->host);
return 1;
}
谢谢大家
作者: fei1700 发布时间: 2010-08-24
一定是我问问题的方法有问题,连打开这个帖子的人都很少
还是我自己说一下这个问题的情况
我如果把bouncesz从8K改成64k,就没这个现象了。搞得我问题都不好查了。
还是我自己说一下这个问题的情况
我如果把bouncesz从8K改成64k,就没这个现象了。搞得我问题都不好查了。
作者: fei1700 发布时间: 2010-08-26
相关阅读 更多
热门阅读
-
office 2019专业增强版最新2021版激活秘钥/序列号/激活码推荐 附激活工具
阅读:74
-
如何安装mysql8.0
阅读:31
-
Word快速设置标题样式步骤详解
阅读:28
-
20+道必知必会的Vue面试题(附答案解析)
阅读:37
-
HTML如何制作表单
阅读:22
-
百词斩可以改天数吗?当然可以,4个步骤轻松修改天数!
阅读:31
-
ET文件格式和XLS格式文件之间如何转化?
阅读:24
-
react和vue的区别及优缺点是什么
阅读:121
-
支付宝人脸识别如何关闭?
阅读:21
-
腾讯微云怎么修改照片或视频备份路径?
阅读:28