[F][PATCH 6/7] btrfs: use larger zlib buffer for s390 hardware compression
Stefan Bader
stefan.bader at canonical.com
Fri Feb 28 15:46:42 UTC 2020
On 18.02.20 14:12, frank.heimes at canonical.com wrote:
> From: Mikhail Zaslonko <zaslonko at linux.ibm.com>
>
> BugLink: https://bugs.launchpad.net/bugs/1830208
>
> In order to benefit from s390 zlib hardware compression support,
> increase the btrfs zlib workspace buffer size from 1 to 4 pages (if s390
> zlib hardware support is enabled on the machine).
>
> This brings up to 60% better performance in hardware on s390 compared to
> the PAGE_SIZE buffer and much more compared to the software zlib
> processing in btrfs. In case of memory pressure, fall back to a single
> page buffer during workspace allocation.
>
> The data compressed with larger input buffers will still conform to zlib
> standard and thus can be decompressed also on a systems that uses only
> PAGE_SIZE buffer for btrfs zlib.
>
> Link: http://lkml.kernel.org/r/20200108105103.29028-1-zaslonko@linux.ibm.com
> Signed-off-by: Mikhail Zaslonko <zaslonko at linux.ibm.com>
> Reviewed-by: David Sterba <dsterba at suse.com>
> Cc: Chris Mason <clm at fb.com>
> Cc: Josef Bacik <josef at toxicpanda.com>
> Cc: David Sterba <dsterba at suse.com>
> Cc: Richard Purdie <rpurdie at rpsys.net>
> Cc: Heiko Carstens <heiko.carstens at de.ibm.com>
> Cc: Vasily Gorbik <gor at linux.ibm.com>
> Cc: Christian Borntraeger <borntraeger at de.ibm.com>
> Cc: Eduard Shishkin <edward6 at linux.ibm.com>
> Cc: Ilya Leoshkevich <iii at linux.ibm.com>
> Signed-off-by: Andrew Morton <akpm at linux-foundation.org>
> Signed-off-by: Linus Torvalds <torvalds at linux-foundation.org>
> (cherry picked from commit 3fd396afc05fc90097276c6b7a70c406ad4df5bb)
> Signed-off-by: Frank Heimes <frank.heimes at canonical.com>
> ---
Same as with patch #1. I bit hard to tell the changes are right. But as well
upstream without any follow-up, yet. Maybe a little less well tested as I am not
sure how many people run btrfs with compression turned on... and even if, who
runs btrfs anyway...
So, meh...
> fs/btrfs/compression.c | 2 +-
> fs/btrfs/zlib.c | 135 ++++++++++++++++++++++++++++++-----------
> 2 files changed, 101 insertions(+), 36 deletions(-)
>
> diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
> index b05b361e2062..f789b356fd8b 100644
> --- a/fs/btrfs/compression.c
> +++ b/fs/btrfs/compression.c
> @@ -1158,7 +1158,7 @@ int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
> /* copy bytes from the working buffer into the pages */
> while (working_bytes > 0) {
> bytes = min_t(unsigned long, bvec.bv_len,
> - PAGE_SIZE - buf_offset);
> + PAGE_SIZE - (buf_offset % PAGE_SIZE));
> bytes = min(bytes, working_bytes);
>
> kaddr = kmap_atomic(bvec.bv_page);
> diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
> index df1aace5df50..d27aab97c684 100644
> --- a/fs/btrfs/zlib.c
> +++ b/fs/btrfs/zlib.c
> @@ -20,9 +20,13 @@
> #include <linux/refcount.h>
> #include "compression.h"
>
> +/* workspace buffer size for s390 zlib hardware support */
> +#define ZLIB_DFLTCC_BUF_SIZE (4 * PAGE_SIZE)
> +
> struct workspace {
> z_stream strm;
> char *buf;
> + unsigned int buf_size;
> struct list_head list;
> int level;
> };
> @@ -76,7 +80,21 @@ static struct list_head *zlib_alloc_workspace(unsigned int level)
> zlib_inflate_workspacesize());
> workspace->strm.workspace = kvmalloc(workspacesize, GFP_KERNEL);
> workspace->level = level;
> - workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
> + workspace->buf = NULL;
> + /*
> + * In case of s390 zlib hardware support, allocate lager workspace
> + * buffer. If allocator fails, fall back to a single page buffer.
> + */
> + if (zlib_deflate_dfltcc_enabled()) {
> + workspace->buf = kmalloc(ZLIB_DFLTCC_BUF_SIZE,
> + __GFP_NOMEMALLOC | __GFP_NORETRY |
> + __GFP_NOWARN | GFP_NOIO);
> + workspace->buf_size = ZLIB_DFLTCC_BUF_SIZE;
> + }
> + if (!workspace->buf) {
> + workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
> + workspace->buf_size = PAGE_SIZE;
> + }
> if (!workspace->strm.workspace || !workspace->buf)
> goto fail;
>
> @@ -104,6 +122,7 @@ static int zlib_compress_pages(struct list_head *ws,
> struct page *in_page = NULL;
> struct page *out_page = NULL;
> unsigned long bytes_left;
> + unsigned int in_buf_pages;
> unsigned long len = *total_out;
> unsigned long nr_dest_pages = *out_pages;
> const unsigned long max_out = nr_dest_pages * PAGE_SIZE;
> @@ -121,9 +140,6 @@ static int zlib_compress_pages(struct list_head *ws,
> workspace->strm.total_in = 0;
> workspace->strm.total_out = 0;
>
> - in_page = find_get_page(mapping, start >> PAGE_SHIFT);
> - data_in = kmap(in_page);
> -
> out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
> if (out_page == NULL) {
> ret = -ENOMEM;
> @@ -133,12 +149,51 @@ static int zlib_compress_pages(struct list_head *ws,
> pages[0] = out_page;
> nr_pages = 1;
>
> - workspace->strm.next_in = data_in;
> + workspace->strm.next_in = workspace->buf;
> + workspace->strm.avail_in = 0;
> workspace->strm.next_out = cpage_out;
> workspace->strm.avail_out = PAGE_SIZE;
> - workspace->strm.avail_in = min(len, PAGE_SIZE);
>
> while (workspace->strm.total_in < len) {
> + /*
> + * Get next input pages and copy the contents to
> + * the workspace buffer if required.
> + */
> + if (workspace->strm.avail_in == 0) {
> + bytes_left = len - workspace->strm.total_in;
> + in_buf_pages = min(DIV_ROUND_UP(bytes_left, PAGE_SIZE),
> + workspace->buf_size / PAGE_SIZE);
> + if (in_buf_pages > 1) {
> + int i;
> +
> + for (i = 0; i < in_buf_pages; i++) {
> + if (in_page) {
> + kunmap(in_page);
> + put_page(in_page);
> + }
> + in_page = find_get_page(mapping,
> + start >> PAGE_SHIFT);
> + data_in = kmap(in_page);
> + memcpy(workspace->buf + i * PAGE_SIZE,
> + data_in, PAGE_SIZE);
> + start += PAGE_SIZE;
> + }
> + workspace->strm.next_in = workspace->buf;
> + } else {
> + if (in_page) {
> + kunmap(in_page);
> + put_page(in_page);
> + }
> + in_page = find_get_page(mapping,
> + start >> PAGE_SHIFT);
> + data_in = kmap(in_page);
> + start += PAGE_SIZE;
> + workspace->strm.next_in = data_in;
> + }
> + workspace->strm.avail_in = min(bytes_left,
> + (unsigned long) workspace->buf_size);
> + }
> +
> ret = zlib_deflate(&workspace->strm, Z_SYNC_FLUSH);
> if (ret != Z_OK) {
> pr_debug("BTRFS: deflate in loop returned %d\n",
> @@ -180,33 +235,43 @@ static int zlib_compress_pages(struct list_head *ws,
> /* we're all done */
> if (workspace->strm.total_in >= len)
> break;
> -
> - /* we've read in a full page, get a new one */
> - if (workspace->strm.avail_in == 0) {
> - if (workspace->strm.total_out > max_out)
> - break;
> -
> - bytes_left = len - workspace->strm.total_in;
> - kunmap(in_page);
> - put_page(in_page);
> -
> - start += PAGE_SIZE;
> - in_page = find_get_page(mapping,
> - start >> PAGE_SHIFT);
> - data_in = kmap(in_page);
> - workspace->strm.avail_in = min(bytes_left,
> - PAGE_SIZE);
> - workspace->strm.next_in = data_in;
> - }
> + if (workspace->strm.total_out > max_out)
> + break;
> }
> workspace->strm.avail_in = 0;
> - ret = zlib_deflate(&workspace->strm, Z_FINISH);
> - zlib_deflateEnd(&workspace->strm);
> -
> - if (ret != Z_STREAM_END) {
> - ret = -EIO;
> - goto out;
> + /*
> + * Call deflate with Z_FINISH flush parameter providing more output
> + * space but no more input data, until it returns with Z_STREAM_END.
> + */
> + while (ret != Z_STREAM_END) {
> + ret = zlib_deflate(&workspace->strm, Z_FINISH);
> + if (ret == Z_STREAM_END)
> + break;
> + if (ret != Z_OK && ret != Z_BUF_ERROR) {
> + zlib_deflateEnd(&workspace->strm);
> + ret = -EIO;
> + goto out;
> + } else if (workspace->strm.avail_out == 0) {
> + /* get another page for the stream end */
> + kunmap(out_page);
> + if (nr_pages == nr_dest_pages) {
> + out_page = NULL;
> + ret = -E2BIG;
> + goto out;
> + }
> + out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
> + if (out_page == NULL) {
> + ret = -ENOMEM;
> + goto out;
> + }
> + cpage_out = kmap(out_page);
> + pages[nr_pages] = out_page;
> + nr_pages++;
> + workspace->strm.avail_out = PAGE_SIZE;
> + workspace->strm.next_out = cpage_out;
> + }
> }
> + zlib_deflateEnd(&workspace->strm);
>
> if (workspace->strm.total_out >= workspace->strm.total_in) {
> ret = -E2BIG;
> @@ -250,7 +315,7 @@ static int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
>
> workspace->strm.total_out = 0;
> workspace->strm.next_out = workspace->buf;
> - workspace->strm.avail_out = PAGE_SIZE;
> + workspace->strm.avail_out = workspace->buf_size;
>
> /* If it's deflate, and it's got no preset dictionary, then
> we can tell zlib to skip the adler32 check. */
> @@ -289,7 +354,7 @@ static int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
> }
>
> workspace->strm.next_out = workspace->buf;
> - workspace->strm.avail_out = PAGE_SIZE;
> + workspace->strm.avail_out = workspace->buf_size;
>
> if (workspace->strm.avail_in == 0) {
> unsigned long tmp;
> @@ -340,7 +405,7 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
> workspace->strm.total_in = 0;
>
> workspace->strm.next_out = workspace->buf;
> - workspace->strm.avail_out = PAGE_SIZE;
> + workspace->strm.avail_out = workspace->buf_size;
> workspace->strm.total_out = 0;
> /* If it's deflate, and it's got no preset dictionary, then
> we can tell zlib to skip the adler32 check. */
> @@ -384,7 +449,7 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
> buf_offset = 0;
>
> bytes = min(PAGE_SIZE - pg_offset,
> - PAGE_SIZE - buf_offset);
> + PAGE_SIZE - (buf_offset % PAGE_SIZE));
> bytes = min(bytes, bytes_left);
>
> kaddr = kmap_atomic(dest_page);
> @@ -395,7 +460,7 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
> bytes_left -= bytes;
> next:
> workspace->strm.next_out = workspace->buf;
> - workspace->strm.avail_out = PAGE_SIZE;
> + workspace->strm.avail_out = workspace->buf_size;
> }
>
> if (ret != Z_STREAM_END && bytes_left != 0)
>
-------------- next part --------------
A non-text attachment was scrubbed...
Name: signature.asc
Type: application/pgp-signature
Size: 833 bytes
Desc: OpenPGP digital signature
URL: <https://lists.ubuntu.com/archives/kernel-team/attachments/20200228/bc74b4e9/attachment.sig>
More information about the kernel-team
mailing list