diff options
author | Nick Piggin <npiggin@suse.de> | 2010-05-26 15:41:14 +0200 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2010-06-01 11:09:20 +0200 |
commit | b7c335713ea130d707c22d7f7c57a8eca75ded7e (patch) | |
tree | 302213166522d0af8bcb01417b69cd7bd445cb49 /drivers/block | |
parent | 0e3c9a2284f5417f196e327c254d0b84c9ee8929 (diff) | |
download | linux-b7c335713ea130d707c22d7f7c57a8eca75ded7e.tar.gz |
brd: support discard
Support discard requests in brd by zeroing or deleting the underlying backing
pages. This is simply to help with testing and documentation nature of
brd code.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/brd.c | 53 |
1 files changed, 52 insertions, 1 deletions
diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 6081e81d5738..f1bf79d9bc0a 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -133,6 +133,28 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) return page; } +static void brd_free_page(struct brd_device *brd, sector_t sector) +{ + struct page *page; + pgoff_t idx; + + spin_lock(&brd->brd_lock); + idx = sector >> PAGE_SECTORS_SHIFT; + page = radix_tree_delete(&brd->brd_pages, idx); + spin_unlock(&brd->brd_lock); + if (page) + __free_page(page); +} + +static void brd_zero_page(struct brd_device *brd, sector_t sector) +{ + struct page *page; + + page = brd_lookup_page(brd, sector); + if (page) + clear_highpage(page); +} + /* * Free all backing store pages and radix tree. This must only be called when * there are no other users of the device. @@ -189,6 +211,24 @@ static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n) return 0; } +static void discard_from_brd(struct brd_device *brd, + sector_t sector, size_t n) +{ + while (n >= PAGE_SIZE) { + /* + * Don't want to actually discard pages here because + * re-allocating the pages can result in writeback + * deadlocks under heavy load. + */ + if (0) + brd_free_page(brd, sector); + else + brd_zero_page(brd, sector); + sector += PAGE_SIZE >> SECTOR_SHIFT; + n -= PAGE_SIZE; + } +} + /* * Copy n bytes from src to the brd starting at sector. Does not sleep. */ @@ -300,6 +340,12 @@ static int brd_make_request(struct request_queue *q, struct bio *bio) get_capacity(bdev->bd_disk)) goto out; + if (unlikely(bio_rw_flagged(bio, BIO_RW_DISCARD))) { + err = 0; + discard_from_brd(brd, sector, bio->bi_size); + goto out; + } + rw = bio_rw(bio); if (rw == READA) rw = READ; @@ -320,7 +366,7 @@ out: } #ifdef CONFIG_BLK_DEV_XIP -static int brd_direct_access (struct block_device *bdev, sector_t sector, +static int brd_direct_access(struct block_device *bdev, sector_t sector, void **kaddr, unsigned long *pfn) { struct brd_device *brd = bdev->bd_disk->private_data; @@ -437,6 +483,11 @@ static struct brd_device *brd_alloc(int i) blk_queue_max_hw_sectors(brd->brd_queue, 1024); blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY); + brd->brd_queue->limits.discard_granularity = PAGE_SIZE; + brd->brd_queue->limits.max_discard_sectors = UINT_MAX; + brd->brd_queue->limits.discard_zeroes_data = 1; + queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, brd->brd_queue); + disk = brd->brd_disk = alloc_disk(1 << part_shift); if (!disk) goto out_free_queue; |