Backport 5.10 LTS erofs to 4.19. Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com> Change-Id: Ibf9c0c47e46090b72e75f09a347100f4ff64f28d Signed-off-by: Cyber Knight <cyberknight755@gmail.com> Signed-off-by: Ruchit <ruchitmarathe@gmail.com>fourteen
parent
92560b6348
commit
4bc5b4ae2e
@ -1,13 +1,11 @@ |
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
|
||||
EROFS_VERSION = "1.0pre1"
|
||||
EROFS_VERSION = "1.0"
|
||||
|
||||
ccflags-y += -Wall -DEROFS_VERSION=\"$(EROFS_VERSION)\"
|
||||
ccflags-y += -DEROFS_VERSION=\"$(EROFS_VERSION)\"
|
||||
|
||||
obj-$(CONFIG_EROFS_FS) += erofs.o
|
||||
# staging requirement: to be self-contained in its own directory
|
||||
ccflags-y += -I$(src)/include
|
||||
erofs-objs := super.o inode.o data.o namei.o dir.o utils.o
|
||||
erofs-$(CONFIG_EROFS_FS_XATTR) += xattr.o
|
||||
erofs-$(CONFIG_EROFS_FS_ZIP) += unzip_vle.o unzip_lz4.o unzip_vle_lz4.o
|
||||
erofs-$(CONFIG_EROFS_FS_ZIP) += decompressor.o zmap.o zdata.o
|
||||
|
||||
|
@ -0,0 +1,58 @@ |
||||
/* SPDX-License-Identifier: GPL-2.0-only */ |
||||
/*
|
||||
* Copyright (C) 2019 HUAWEI, Inc. |
||||
* https://www.huawei.com/
|
||||
*/ |
||||
#ifndef __EROFS_FS_COMPRESS_H |
||||
#define __EROFS_FS_COMPRESS_H |
||||
|
||||
#include "internal.h" |
||||
|
||||
enum { |
||||
Z_EROFS_COMPRESSION_SHIFTED = Z_EROFS_COMPRESSION_MAX, |
||||
Z_EROFS_COMPRESSION_RUNTIME_MAX |
||||
}; |
||||
|
||||
struct z_erofs_decompress_req { |
||||
struct super_block *sb; |
||||
struct page **in, **out; |
||||
|
||||
unsigned short pageofs_out; |
||||
unsigned int inputsize, outputsize; |
||||
|
||||
/* indicate the algorithm will be used for decompression */ |
||||
unsigned int alg; |
||||
bool inplace_io, partial_decoding; |
||||
}; |
||||
|
||||
/*
|
||||
* - 0x5A110C8D ('sallocated', Z_EROFS_MAPPING_STAGING) - |
||||
* used to mark temporary allocated pages from other |
||||
* file/cached pages and NULL mapping pages. |
||||
*/ |
||||
#define Z_EROFS_MAPPING_STAGING ((void *)0x5A110C8D) |
||||
|
||||
/* check if a page is marked as staging */ |
||||
static inline bool z_erofs_page_is_staging(struct page *page) |
||||
{ |
||||
return page->mapping == Z_EROFS_MAPPING_STAGING; |
||||
} |
||||
|
||||
static inline bool z_erofs_put_stagingpage(struct list_head *pagepool, |
||||
struct page *page) |
||||
{ |
||||
if (!z_erofs_page_is_staging(page)) |
||||
return false; |
||||
|
||||
/* staging pages should not be used by others at the same time */ |
||||
if (page_ref_count(page) > 1) |
||||
put_page(page); |
||||
else |
||||
list_add(&page->lru, pagepool); |
||||
return true; |
||||
} |
||||
|
||||
int z_erofs_decompress(struct z_erofs_decompress_req *rq, |
||||
struct list_head *pagepool); |
||||
|
||||
#endif |
@ -0,0 +1,344 @@ |
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2019 HUAWEI, Inc. |
||||
* https://www.huawei.com/
|
||||
*/ |
||||
#include "compress.h" |
||||
#include <linux/module.h> |
||||
#include <linux/lz4.h> |
||||
|
||||
#ifndef LZ4_DISTANCE_MAX /* history window size */ |
||||
#define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */ |
||||
#endif |
||||
|
||||
#define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1) |
||||
#ifndef LZ4_DECOMPRESS_INPLACE_MARGIN |
||||
#define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize) (((srcsize) >> 8) + 32) |
||||
#endif |
||||
|
||||
struct z_erofs_decompressor { |
||||
/*
|
||||
* if destpages have sparsed pages, fill them with bounce pages. |
||||
* it also check whether destpages indicate continuous physical memory. |
||||
*/ |
||||
int (*prepare_destpages)(struct z_erofs_decompress_req *rq, |
||||
struct list_head *pagepool); |
||||
int (*decompress)(struct z_erofs_decompress_req *rq, u8 *out); |
||||
char *name; |
||||
}; |
||||
|
||||
static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq, |
||||
struct list_head *pagepool) |
||||
{ |
||||
const unsigned int nr = |
||||
PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; |
||||
struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL }; |
||||
unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES, |
||||
BITS_PER_LONG)] = { 0 }; |
||||
void *kaddr = NULL; |
||||
unsigned int i, j, top; |
||||
|
||||
top = 0; |
||||
for (i = j = 0; i < nr; ++i, ++j) { |
||||
struct page *const page = rq->out[i]; |
||||
struct page *victim; |
||||
|
||||
if (j >= LZ4_MAX_DISTANCE_PAGES) |
||||
j = 0; |
||||
|
||||
/* 'valid' bounced can only be tested after a complete round */ |
||||
if (test_bit(j, bounced)) { |
||||
DBG_BUGON(i < LZ4_MAX_DISTANCE_PAGES); |
||||
DBG_BUGON(top >= LZ4_MAX_DISTANCE_PAGES); |
||||
availables[top++] = rq->out[i - LZ4_MAX_DISTANCE_PAGES]; |
||||
} |
||||
|
||||
if (page) { |
||||
__clear_bit(j, bounced); |
||||
if (kaddr) { |
||||
if (kaddr + PAGE_SIZE == page_address(page)) |
||||
kaddr += PAGE_SIZE; |
||||
else |
||||
kaddr = NULL; |
||||
} else if (!i) { |
||||
kaddr = page_address(page); |
||||
} |
||||
continue; |
||||
} |
||||
kaddr = NULL; |
||||
__set_bit(j, bounced); |
||||
|
||||
if (top) { |
||||
victim = availables[--top]; |
||||
get_page(victim); |
||||
} else { |
||||
victim = erofs_allocpage(pagepool, GFP_KERNEL); |
||||
if (!victim) |
||||
return -ENOMEM; |
||||
victim->mapping = Z_EROFS_MAPPING_STAGING; |
||||
} |
||||
rq->out[i] = victim; |
||||
} |
||||
return kaddr ? 1 : 0; |
||||
} |
||||
|
||||
static void *generic_copy_inplace_data(struct z_erofs_decompress_req *rq, |
||||
u8 *src, unsigned int pageofs_in) |
||||
{ |
||||
/*
|
||||
* if in-place decompression is ongoing, those decompressed |
||||
* pages should be copied in order to avoid being overlapped. |
||||
*/ |
||||
struct page **in = rq->in; |
||||
u8 *const tmp = erofs_get_pcpubuf(0); |
||||
u8 *tmpp = tmp; |
||||
unsigned int inlen = rq->inputsize - pageofs_in; |
||||
unsigned int count = min_t(uint, inlen, PAGE_SIZE - pageofs_in); |
||||
|
||||
while (tmpp < tmp + inlen) { |
||||
if (!src) |
||||
src = kmap_atomic(*in); |
||||
memcpy(tmpp, src + pageofs_in, count); |
||||
kunmap_atomic(src); |
||||
src = NULL; |
||||
tmpp += count; |
||||
pageofs_in = 0; |
||||
count = PAGE_SIZE; |
||||
++in; |
||||
} |
||||
return tmp; |
||||
} |
||||
|
||||
static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out) |
||||
{ |
||||
unsigned int inputmargin, inlen; |
||||
u8 *src; |
||||
bool copied, support_0padding; |
||||
int ret; |
||||
|
||||
if (rq->inputsize > PAGE_SIZE) |
||||
return -EOPNOTSUPP; |
||||
|
||||
src = kmap_atomic(*rq->in); |
||||
inputmargin = 0; |
||||
support_0padding = false; |
||||
|
||||
/* decompression inplace is only safe when 0padding is enabled */ |
||||
if (EROFS_SB(rq->sb)->feature_incompat & |
||||
EROFS_FEATURE_INCOMPAT_LZ4_0PADDING) { |
||||
support_0padding = true; |
||||
|
||||
while (!src[inputmargin & ~PAGE_MASK]) |
||||
if (!(++inputmargin & ~PAGE_MASK)) |
||||
break; |
||||
|
||||
if (inputmargin >= rq->inputsize) { |
||||
kunmap_atomic(src); |
||||
return -EIO; |
||||
} |
||||
} |
||||
|
||||
copied = false; |
||||
inlen = rq->inputsize - inputmargin; |
||||
if (rq->inplace_io) { |
||||
const uint oend = (rq->pageofs_out + |
||||
rq->outputsize) & ~PAGE_MASK; |
||||
const uint nr = PAGE_ALIGN(rq->pageofs_out + |
||||
rq->outputsize) >> PAGE_SHIFT; |
||||
|
||||
if (rq->partial_decoding || !support_0padding || |
||||
rq->out[nr - 1] != rq->in[0] || |
||||
rq->inputsize - oend < |
||||
LZ4_DECOMPRESS_INPLACE_MARGIN(inlen)) { |
||||
src = generic_copy_inplace_data(rq, src, inputmargin); |
||||
inputmargin = 0; |
||||
copied = true; |
||||
} |
||||
} |
||||
|
||||
/* legacy format could compress extra data in a pcluster. */ |
||||
if (rq->partial_decoding || !support_0padding) |
||||
ret = LZ4_decompress_safe_partial(src + inputmargin, out, |
||||
inlen, rq->outputsize, |
||||
rq->outputsize); |
||||
else |
||||
ret = LZ4_decompress_safe(src + inputmargin, out, |
||||
inlen, rq->outputsize); |
||||
|
||||
if (ret != rq->outputsize) { |
||||
erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]", |
||||
ret, inlen, inputmargin, rq->outputsize); |
||||
|
||||
WARN_ON(1); |
||||
print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET, |
||||
16, 1, src + inputmargin, inlen, true); |
||||
print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET, |
||||
16, 1, out, rq->outputsize, true); |
||||
|
||||
if (ret >= 0) |
||||
memset(out + ret, 0, rq->outputsize - ret); |
||||
ret = -EIO; |
||||
} |
||||
|
||||
if (copied) |
||||
erofs_put_pcpubuf(src); |
||||
else |
||||
kunmap_atomic(src); |
||||
return ret; |
||||
} |
||||
|
||||
static struct z_erofs_decompressor decompressors[] = { |
||||
[Z_EROFS_COMPRESSION_SHIFTED] = { |
||||
.name = "shifted" |
||||
}, |
||||
[Z_EROFS_COMPRESSION_LZ4] = { |
||||
.prepare_destpages = z_erofs_lz4_prepare_destpages, |
||||
.decompress = z_erofs_lz4_decompress, |
||||
.name = "lz4" |
||||
}, |
||||
}; |
||||
|
||||
static void copy_from_pcpubuf(struct page **out, const char *dst, |
||||
unsigned short pageofs_out, |
||||
unsigned int outputsize) |
||||
{ |
||||
const char *end = dst + outputsize; |
||||
const unsigned int righthalf = PAGE_SIZE - pageofs_out; |
||||
const char *cur = dst - pageofs_out; |
||||
|
||||
while (cur < end) { |
||||
struct page *const page = *out++; |
||||
|
||||
if (page) { |
||||
char *buf = kmap_atomic(page); |
||||
|
||||
if (cur >= dst) { |
||||
memcpy(buf, cur, min_t(uint, PAGE_SIZE, |
||||
end - cur)); |
||||
} else { |
||||
memcpy(buf + pageofs_out, cur + pageofs_out, |
||||
min_t(uint, righthalf, end - cur)); |
||||
} |
||||
kunmap_atomic(buf); |
||||
} |
||||
cur += PAGE_SIZE; |
||||
} |
||||
} |
||||
|
||||
static int z_erofs_decompress_generic(struct z_erofs_decompress_req *rq, |
||||
struct list_head *pagepool) |
||||
{ |
||||
const unsigned int nrpages_out = |
||||
PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; |
||||
const struct z_erofs_decompressor *alg = decompressors + rq->alg; |
||||
unsigned int dst_maptype; |
||||
void *dst; |
||||
int ret, i; |
||||
|
||||
if (nrpages_out == 1 && !rq->inplace_io) { |
||||
DBG_BUGON(!*rq->out); |
||||
dst = kmap_atomic(*rq->out); |
||||
dst_maptype = 0; |
||||
goto dstmap_out; |
||||
} |
||||
|
||||
/*
|
||||
* For the case of small output size (especially much less |
||||
* than PAGE_SIZE), memcpy the decompressed data rather than |
||||
* compressed data is preferred. |
||||
*/ |
||||
if (rq->outputsize <= PAGE_SIZE * 7 / 8) { |
||||
dst = erofs_get_pcpubuf(0); |
||||
if (IS_ERR(dst)) |
||||
return PTR_ERR(dst); |
||||
|
||||
rq->inplace_io = false; |
||||
ret = alg->decompress(rq, dst); |
||||
if (!ret) |
||||
copy_from_pcpubuf(rq->out, dst, rq->pageofs_out, |
||||
rq->outputsize); |
||||
|
||||
erofs_put_pcpubuf(dst); |
||||
return ret; |
||||
} |
||||
|
||||
ret = alg->prepare_destpages(rq, pagepool); |
||||
if (ret < 0) { |
||||
return ret; |
||||
} else if (ret) { |
||||
dst = page_address(*rq->out); |
||||
dst_maptype = 1; |
||||
goto dstmap_out; |
||||
} |
||||
|
||||
i = 0; |
||||
while (1) { |
||||
dst = vm_map_ram(rq->out, nrpages_out, -1, PAGE_KERNEL); |
||||
|
||||
/* retry two more times (totally 3 times) */ |
||||
if (dst || ++i >= 3) |
||||
break; |
||||
vm_unmap_aliases(); |
||||
} |
||||
|
||||
if (!dst) |
||||
return -ENOMEM; |
||||
|
||||
dst_maptype = 2; |
||||
|
||||
dstmap_out: |
||||
ret = alg->decompress(rq, dst + rq->pageofs_out); |
||||
|
||||
if (!dst_maptype) |
||||
kunmap_atomic(dst); |
||||
else if (dst_maptype == 2) |
||||
vm_unmap_ram(dst, nrpages_out); |
||||
return ret; |
||||
} |
||||
|
||||
static int z_erofs_shifted_transform(const struct z_erofs_decompress_req *rq, |
||||
struct list_head *pagepool) |
||||
{ |
||||
const unsigned int nrpages_out = |
||||
PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; |
||||
const unsigned int righthalf = PAGE_SIZE - rq->pageofs_out; |
||||
unsigned char *src, *dst; |
||||
|
||||
if (nrpages_out > 2) { |
||||
DBG_BUGON(1); |
||||
return -EIO; |
||||
} |
||||
|
||||
if (rq->out[0] == *rq->in) { |
||||
DBG_BUGON(nrpages_out != 1); |
||||
return 0; |
||||
} |
||||
|
||||
src = kmap_atomic(*rq->in); |
||||
if (rq->out[0]) { |
||||
dst = kmap_atomic(rq->out[0]); |
||||
memcpy(dst + rq->pageofs_out, src, righthalf); |
||||
kunmap_atomic(dst); |
||||
} |
||||
|
||||
if (nrpages_out == 2) { |
||||
DBG_BUGON(!rq->out[1]); |
||||
if (rq->out[1] == *rq->in) { |
||||
memmove(src, src + righthalf, rq->pageofs_out); |
||||
} else { |
||||
dst = kmap_atomic(rq->out[1]); |
||||
memcpy(dst, src + righthalf, rq->pageofs_out); |
||||
kunmap_atomic(dst); |
||||
} |
||||
} |
||||
kunmap_atomic(src); |
||||
return 0; |
||||
} |
||||
|
||||
int z_erofs_decompress(struct z_erofs_decompress_req *rq, |
||||
struct list_head *pagepool) |
||||
{ |
||||
if (rq->alg == Z_EROFS_COMPRESSION_SHIFTED) |
||||
return z_erofs_shifted_transform(rq, pagepool); |
||||
return z_erofs_decompress_generic(rq, pagepool); |
||||
} |
@ -1,227 +0,0 @@ |
||||
#ifndef __LZ4DEFS_H__ |
||||
#define __LZ4DEFS_H__ |
||||
|
||||
/*
|
||||
* lz4defs.h -- common and architecture specific defines for the kernel usage |
||||
|
||||
* LZ4 - Fast LZ compression algorithm |
||||
* Copyright (C) 2011-2016, Yann Collet. |
||||
* BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
* Redistribution and use in source and binary forms, with or without |
||||
* modification, are permitted provided that the following conditions are |
||||
* met: |
||||
* * Redistributions of source code must retain the above copyright |
||||
* notice, this list of conditions and the following disclaimer. |
||||
* * Redistributions in binary form must reproduce the above |
||||
* copyright notice, this list of conditions and the following disclaimer |
||||
* in the documentation and/or other materials provided with the |
||||
* distribution. |
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||||
* You can contact the author at : |
||||
* - LZ4 homepage : http://www.lz4.org
|
||||
* - LZ4 source repository : https://github.com/lz4/lz4
|
||||
* |
||||
* Changed for kernel usage by: |
||||
* Sven Schmidt <4sschmid@informatik.uni-hamburg.de> |
||||
*/ |
||||
|
||||
#include <asm/unaligned.h> |
||||
#include <linux/string.h> /* memset, memcpy */ |
||||
|
||||
#define FORCE_INLINE __always_inline |
||||
|
||||
/*-************************************
|
||||
* Basic Types |
||||
**************************************/ |
||||
#include <linux/types.h> |
||||
|
||||
typedef uint8_t BYTE; |
||||
typedef uint16_t U16; |
||||
typedef uint32_t U32; |
||||
typedef int32_t S32; |
||||
typedef uint64_t U64; |
||||
typedef uintptr_t uptrval; |
||||
|
||||
/*-************************************
|
||||
* Architecture specifics |
||||
**************************************/ |
||||
#if defined(CONFIG_64BIT) |
||||
#define LZ4_ARCH64 1 |
||||
#else |
||||
#define LZ4_ARCH64 0 |
||||
#endif |
||||
|
||||
#if defined(__LITTLE_ENDIAN) |
||||
#define LZ4_LITTLE_ENDIAN 1 |
||||
#else |
||||
#define LZ4_LITTLE_ENDIAN 0 |
||||
#endif |
||||
|
||||
/*-************************************
|
||||
* Constants |
||||
**************************************/ |
||||
#define MINMATCH 4 |
||||
|
||||
#define WILDCOPYLENGTH 8 |
||||
#define LASTLITERALS 5 |
||||
#define MFLIMIT (WILDCOPYLENGTH + MINMATCH) |
||||
|
||||
/* Increase this value ==> compression run slower on incompressible data */ |
||||
#define LZ4_SKIPTRIGGER 6 |
||||
|
||||
#define HASH_UNIT sizeof(size_t) |
||||
|
||||
#define KB (1 << 10) |
||||
#define MB (1 << 20) |
||||
#define GB (1U << 30) |
||||
|
||||
#define MAXD_LOG 16 |
||||
#define MAX_DISTANCE ((1 << MAXD_LOG) - 1) |
||||
#define STEPSIZE sizeof(size_t) |
||||
|
||||
#define ML_BITS 4 |
||||
#define ML_MASK ((1U << ML_BITS) - 1) |
||||
#define RUN_BITS (8 - ML_BITS) |
||||
#define RUN_MASK ((1U << RUN_BITS) - 1) |
||||
|
||||
/*-************************************
|
||||
* Reading and writing into memory |
||||
**************************************/ |
||||
static FORCE_INLINE U16 LZ4_read16(const void *ptr) |
||||
{ |
||||
return get_unaligned((const U16 *)ptr); |
||||
} |
||||
|
||||
static FORCE_INLINE U32 LZ4_read32(const void *ptr) |
||||
{ |
||||
return get_unaligned((const U32 *)ptr); |
||||
} |
||||
|
||||
static FORCE_INLINE size_t LZ4_read_ARCH(const void *ptr) |
||||
{ |
||||
return get_unaligned((const size_t *)ptr); |
||||
} |
||||
|
||||
static FORCE_INLINE void LZ4_write16(void *memPtr, U16 value) |
||||
{ |
||||
put_unaligned(value, (U16 *)memPtr); |
||||
} |
||||
|
||||
static FORCE_INLINE void LZ4_write32(void *memPtr, U32 value) |
||||
{ |
||||
put_unaligned(value, (U32 *)memPtr); |
||||
} |
||||
|
||||
static FORCE_INLINE U16 LZ4_readLE16(const void *memPtr) |
||||
{ |
||||
return get_unaligned_le16(memPtr); |
||||
} |
||||
|
||||
static FORCE_INLINE void LZ4_writeLE16(void *memPtr, U16 value) |
||||
{ |
||||
return put_unaligned_le16(value, memPtr); |
||||
} |
||||
|
||||
static FORCE_INLINE void LZ4_copy8(void *dst, const void *src) |
||||
{ |
||||
#if LZ4_ARCH64 |
||||
U64 a = get_unaligned((const U64 *)src); |
||||
|
||||
put_unaligned(a, (U64 *)dst); |
||||
#else |
||||
U32 a = get_unaligned((const U32 *)src); |
||||
U32 b = get_unaligned((const U32 *)src + 1); |
||||
|
||||
put_unaligned(a, (U32 *)dst); |
||||
put_unaligned(b, (U32 *)dst + 1); |
||||
#endif |
||||
} |
||||
|
||||
/*
|
||||
* customized variant of memcpy, |
||||
* which can overwrite up to 7 bytes beyond dstEnd |
||||
*/ |
||||
static FORCE_INLINE void LZ4_wildCopy(void *dstPtr, |
||||
const void *srcPtr, void *dstEnd) |
||||
{ |
||||
BYTE *d = (BYTE *)dstPtr; |
||||
const BYTE *s = (const BYTE *)srcPtr; |
||||
BYTE *const e = (BYTE *)dstEnd; |
||||
|
||||
do { |
||||
LZ4_copy8(d, s); |
||||
d += 8; |
||||
s += 8; |
||||
} while (d < e); |
||||
} |
||||
|
||||
static FORCE_INLINE unsigned int LZ4_NbCommonBytes(register size_t val) |
||||
{ |
||||
#if LZ4_LITTLE_ENDIAN |
||||
return __ffs(val) >> 3; |
||||
#else |
||||
return (BITS_PER_LONG - 1 - __fls(val)) >> 3; |
||||
#endif |
||||
} |
||||
|
||||
static FORCE_INLINE unsigned int LZ4_count( |
||||
const BYTE *pIn, |
||||
const BYTE *pMatch, |
||||
const BYTE *pInLimit) |
||||
{ |
||||
const BYTE *const pStart = pIn; |
||||
|
||||
while (likely(pIn < pInLimit - (STEPSIZE - 1))) { |
||||
size_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn); |
||||
|
||||
if (!diff) { |
||||
pIn += STEPSIZE; |
||||
pMatch += STEPSIZE; |
||||
continue; |
||||
} |
||||
|
||||
pIn += LZ4_NbCommonBytes(diff); |
||||
|
||||
return (unsigned int)(pIn - pStart); |
||||
} |
||||
|
||||
#if LZ4_ARCH64 |
||||
if ((pIn < (pInLimit - 3)) |
||||
&& (LZ4_read32(pMatch) == LZ4_read32(pIn))) { |
||||
pIn += 4; |
||||
pMatch += 4; |
||||
} |
||||
#endif |
||||
|
||||
if ((pIn < (pInLimit - 1)) |
||||
&& (LZ4_read16(pMatch) == LZ4_read16(pIn))) { |
||||
pIn += 2; |
||||
pMatch += 2; |
||||
} |
||||
|
||||
if ((pIn < pInLimit) && (*pMatch == *pIn)) |
||||
pIn++; |
||||
|
||||
return (unsigned int)(pIn - pStart); |
||||
} |
||||
|
||||
typedef enum { noLimit = 0, limitedOutput = 1 } limitedOutput_directive; |
||||
typedef enum { byPtr, byU32, byU16 } tableType_t; |
||||
|
||||
typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive; |
||||
typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive; |
||||
|
||||
typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive; |
||||
typedef enum { full = 0, partial = 1 } earlyEnd_directive; |
||||
|
||||
#endif |
@ -1,251 +0,0 @@ |
||||
// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
|
||||
/*
|
||||
* linux/drivers/staging/erofs/unzip_lz4.c |
||||
* |
||||
* Copyright (C) 2018 HUAWEI, Inc. |
||||
* http://www.huawei.com/
|
||||
* Created by Gao Xiang <gaoxiang25@huawei.com> |
||||
* |
||||
* Original code taken from 'linux/lib/lz4/lz4_decompress.c' |
||||
*/ |
||||
|
||||
/*
|
||||
* LZ4 - Fast LZ compression algorithm |
||||
* Copyright (C) 2011 - 2016, Yann Collet. |
||||
* BSD 2 - Clause License (http://www.opensource.org/licenses/bsd - license.php)
|
||||
* Redistribution and use in source and binary forms, with or without |
||||
* modification, are permitted provided that the following conditions are |
||||
* met: |
||||
* * Redistributions of source code must retain the above copyright |
||||
* notice, this list of conditions and the following disclaimer. |
||||
* * Redistributions in binary form must reproduce the above |
||||
* copyright notice, this list of conditions and the following disclaimer |
||||
* in the documentation and/or other materials provided with the |
||||
* distribution. |
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||||
* You can contact the author at : |
||||
* - LZ4 homepage : http://www.lz4.org
|
||||
* - LZ4 source repository : https://github.com/lz4/lz4
|
||||
* |
||||
* Changed for kernel usage by: |
||||
* Sven Schmidt <4sschmid@informatik.uni-hamburg.de> |
||||
*/ |
||||
#include "internal.h" |
||||
#include <asm/unaligned.h> |
||||
#include "lz4defs.h" |
||||
|
||||
/*
|
||||
* no public solution to solve our requirement yet. |
||||
* see: <required buffer size for LZ4_decompress_safe_partial> |
||||
* https://groups.google.com/forum/#!topic/lz4c/_3kkz5N6n00
|
||||
*/ |
||||
static FORCE_INLINE int customized_lz4_decompress_safe_partial( |
||||
const void * const source, |
||||
void * const dest, |
||||
int inputSize, |
||||
int outputSize) |
||||
{ |
||||
/* Local Variables */ |
||||
const BYTE *ip = (const BYTE *) source; |
||||
const BYTE * const iend = ip + inputSize; |
||||
|
||||
BYTE *op = (BYTE *) dest; |
||||
BYTE * const oend = op + outputSize; |
||||
BYTE *cpy; |
||||
|
||||
static const unsigned int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; |
||||
static const int dec64table[] = { 0, 0, 0, -1, 0, 1, 2, 3 }; |
||||
|
||||
/* Empty output buffer */ |
||||
if (unlikely(outputSize == 0)) |
||||
return ((inputSize == 1) && (*ip == 0)) ? 0 : -1; |
||||
|
||||
/* Main Loop : decode sequences */ |
||||
while (1) { |
||||
size_t length; |
||||
const BYTE *match; |
||||
size_t offset; |
||||
|
||||
/* get literal length */ |
||||
unsigned int const token = *ip++; |
||||
|
||||
length = token>>ML_BITS; |
||||
|
||||
if (length == RUN_MASK) { |
||||
unsigned int s; |
||||
|
||||
do { |
||||
s = *ip++; |
||||
length += s; |
||||
} while ((ip < iend - RUN_MASK) & (s == 255)); |
||||
|
||||
if (unlikely((size_t)(op + length) < (size_t)(op))) { |
||||
/* overflow detection */ |
||||
goto _output_error; |
||||
} |
||||
if (unlikely((size_t)(ip + length) < (size_t)(ip))) { |
||||
/* overflow detection */ |
||||
goto _output_error; |
||||
} |
||||
} |
||||
|
||||
/* copy literals */ |
||||
cpy = op + length; |
||||
if ((cpy > oend - WILDCOPYLENGTH) || |
||||
(ip + length > iend - (2 + 1 + LASTLITERALS))) { |
||||
if (cpy > oend) { |
||||
memcpy(op, ip, length = oend - op); |
||||
op += length; |
||||
break; |
||||
} |
||||
|
||||
if (unlikely(ip + length > iend)) { |
||||
/*
|
||||
* Error : |
||||
* read attempt beyond |
||||
* end of input buffer |
||||
*/ |
||||
goto _output_error; |
||||
} |
||||
|
||||
memcpy(op, ip, length); |
||||
ip += length; |
||||
op += length; |
||||
|
||||
if (ip > iend - 2) |
||||
break; |
||||
/* Necessarily EOF, due to parsing restrictions */ |
||||
/* break; */ |
||||
} else { |
||||
LZ4_wildCopy(op, ip, cpy); |
||||
ip += length; |
||||
op = cpy; |
||||
} |
||||
|
||||
/* get offset */ |
||||
offset = LZ4_readLE16(ip); |
||||
ip += 2; |
||||
match = op - offset; |
||||
|
||||
if (unlikely(match < (const BYTE *)dest)) { |
||||
/* Error : offset outside buffers */ |
||||
goto _output_error; |
||||
} |
||||
|
||||
/* get matchlength */ |
||||
length = token & ML_MASK; |
||||
if (length == ML_MASK) { |
||||
unsigned int s; |
||||
|
||||
do { |
||||
s = *ip++; |
||||
|
||||
if (ip > iend - LASTLITERALS) |
||||
goto _output_error; |
||||
|
||||
length += s; |
||||
} while (s == 255); |
||||
|
||||
if (unlikely((size_t)(op + length) < (size_t)op)) { |
||||
/* overflow detection */ |
||||
goto _output_error; |
||||
} |
||||
} |
||||
|
||||
length += MINMATCH; |
||||
|
||||
/* copy match within block */ |
||||
cpy = op + length; |
||||
|
||||
if (unlikely(cpy >= oend - WILDCOPYLENGTH)) { |
||||
if (cpy >= oend) { |
||||
while (op < oend) |
||||
*op++ = *match++; |
||||
break; |
||||
} |
||||
goto __match; |
||||
} |
||||
|
||||
/* costs ~1%; silence an msan warning when offset == 0 */ |
||||
LZ4_write32(op, (U32)offset); |
||||
|
||||
if (unlikely(offset < 8)) { |
||||
const int dec64 = dec64table[offset]; |
||||
|
||||
op[0] = match[0]; |
||||
op[1] = match[1]; |
||||
op[2] = match[2]; |
||||
op[3] = match[3]; |
||||
match += dec32table[offset]; |
||||
memcpy(op + 4, match, 4); |
||||
match -= dec64; |
||||
} else { |
||||
LZ4_copy8(op, match); |
||||
match += 8; |
||||
} |
||||
|
||||
op += 8; |
||||
|
||||
if (unlikely(cpy > oend - 12)) { |
||||
BYTE * const oCopyLimit = oend - (WILDCOPYLENGTH - 1); |
||||
|
||||
if (op < oCopyLimit) { |
||||
LZ4_wildCopy(op, match, oCopyLimit); |
||||
match += oCopyLimit - op; |
||||
op = oCopyLimit; |
||||
} |
||||
__match: |
||||
while (op < cpy) |
||||
*op++ = *match++; |
||||
} else { |
||||
LZ4_copy8(op, match); |
||||
|
||||
if (length > 16) |
||||
LZ4_wildCopy(op + 8, match + 8, cpy); |
||||
} |
||||
|
||||
op = cpy; /* correction */ |
||||
} |
||||
DBG_BUGON((void *)ip - source > inputSize); |
||||
DBG_BUGON((void *)op - dest > outputSize); |
||||
|
||||
/* Nb of output bytes decoded */ |
||||
return (int) ((void *)op - dest); |
||||
|
||||
/* Overflow error detected */ |
||||
_output_error: |
||||
return -ERANGE; |
||||
} |
||||
|
||||
int z_erofs_unzip_lz4(void *in, void *out, size_t inlen, size_t outlen) |
||||
{ |
||||
int ret = customized_lz4_decompress_safe_partial(in, |
||||
out, inlen, outlen); |
||||
|
||||
if (ret >= 0) |
||||
return ret; |
||||
|
||||
/*
|
||||
* LZ4_decompress_safe will return an error code |
||||
* (< 0) if decompression failed |
||||
*/ |
||||
errln("%s, failed to decompress, in[%p, %zu] outlen[%p, %zu]", |
||||
__func__, in, inlen, out, outlen); |
||||
WARN_ON(1); |
||||
print_hex_dump(KERN_DEBUG, "raw data [in]: ", DUMP_PREFIX_OFFSET, |
||||
16, 1, in, inlen, true); |
||||
print_hex_dump(KERN_DEBUG, "raw data [out]: ", DUMP_PREFIX_OFFSET, |
||||
16, 1, out, outlen, true); |
||||
return -EIO; |
||||
} |
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,210 +0,0 @@ |
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* linux/drivers/staging/erofs/unzip_vle_lz4.c |
||||
* |
||||
* Copyright (C) 2018 HUAWEI, Inc. |
||||
* http://www.huawei.com/
|
||||
* Created by Gao Xiang <gaoxiang25@huawei.com> |
||||
* |
||||
* This file is subject to the terms and conditions of the GNU General Public |
||||
* License. See the file COPYING in the main directory of the Linux |
||||
* distribution for more details. |
||||
*/ |
||||
#include "unzip_vle.h" |
||||
|
||||
#if Z_EROFS_CLUSTER_MAX_PAGES > Z_EROFS_VLE_INLINE_PAGEVECS |
||||
#define EROFS_PERCPU_NR_PAGES Z_EROFS_CLUSTER_MAX_PAGES |
||||
#else |
||||
#define EROFS_PERCPU_NR_PAGES Z_EROFS_VLE_INLINE_PAGEVECS |
||||
#endif |
||||
|
||||
static struct { |
||||
char data[PAGE_SIZE * EROFS_PERCPU_NR_PAGES]; |
||||
} erofs_pcpubuf[NR_CPUS]; |
||||
|
||||
int z_erofs_vle_plain_copy(struct page **compressed_pages, |
||||
unsigned clusterpages, |
||||
struct page **pages, |
||||
unsigned nr_pages, |
||||
unsigned short pageofs) |
||||
{ |
||||
unsigned i, j; |
||||
void *src = NULL; |
||||
const unsigned righthalf = PAGE_SIZE - pageofs; |
||||
char *percpu_data; |
||||
bool mirrored[Z_EROFS_CLUSTER_MAX_PAGES] = { 0 }; |
||||
|
||||
preempt_disable(); |
||||
percpu_data = erofs_pcpubuf[smp_processor_id()].data; |
||||
|
||||
j = 0; |
||||
for (i = 0; i < nr_pages; j = i++) { |
||||
struct page *page = pages[i]; |
||||
void *dst; |
||||
|
||||
if (page == NULL) { |
||||
if (src != NULL) { |
||||
if (!mirrored[j]) |
||||
kunmap_atomic(src); |
||||
src = NULL; |
||||
} |
||||
continue; |
||||
} |
||||
|
||||
dst = kmap_atomic(page); |
||||
|
||||
for (; j < clusterpages; ++j) { |
||||
if (compressed_pages[j] != page) |
||||
continue; |
||||
|
||||
DBG_BUGON(mirrored[j]); |
||||
memcpy(percpu_data + j * PAGE_SIZE, dst, PAGE_SIZE); |
||||
mirrored[j] = true; |
||||
break; |
||||
} |
||||
|
||||
if (i) { |
||||
if (src == NULL) |
||||
src = mirrored[i-1] ? |
||||
percpu_data + (i-1) * PAGE_SIZE : |
||||
kmap_atomic(compressed_pages[i-1]); |
||||
|
||||
memcpy(dst, src + righthalf, pageofs); |
||||
|
||||
if (!mirrored[i-1]) |
||||
kunmap_atomic(src); |
||||
|
||||
if (unlikely(i >= clusterpages)) { |
||||
kunmap_atomic(dst); |
||||
break; |
||||
} |
||||
} |
||||
|
||||
if (!righthalf) |
||||
src = NULL; |
||||
else { |
||||
src = mirrored[i] ? percpu_data + i * PAGE_SIZE : |
||||
kmap_atomic(compressed_pages[i]); |
||||
|
||||
memcpy(dst + pageofs, src, righthalf); |
||||
} |
||||
|
||||
kunmap_atomic(dst); |
||||
} |
||||
|
||||
if (src != NULL && !mirrored[j]) |
||||
kunmap_atomic(src); |
||||
|
||||
preempt_enable(); |
||||
return 0; |
||||
} |
||||
|
||||
extern int z_erofs_unzip_lz4(void *in, void *out, size_t inlen, size_t outlen); |
||||
|
||||
int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages, |
||||
unsigned clusterpages, |
||||
struct page **pages, |
||||
unsigned outlen, |
||||
unsigned short pageofs) |
||||
{ |
||||
void *vin, *vout; |
||||
unsigned nr_pages, i, j; |
||||
int ret; |
||||
|
||||
if (outlen + pageofs > EROFS_PERCPU_NR_PAGES * PAGE_SIZE) |
||||
return -ENOTSUPP; |
||||
|
||||
nr_pages = DIV_ROUND_UP(outlen + pageofs, PAGE_SIZE); |
||||
|
||||
if (clusterpages == 1) { |
||||
vin = kmap_atomic(compressed_pages[0]); |
||||
} else { |
||||
vin = erofs_vmap(compressed_pages, clusterpages); |
||||
if (!vin) |
||||
return -ENOMEM; |
||||
} |
||||
|
||||
preempt_disable(); |
||||
vout = erofs_pcpubuf[smp_processor_id()].data; |
||||
|
||||
ret = z_erofs_unzip_lz4(vin, vout + pageofs, |
||||
clusterpages * PAGE_SIZE, outlen); |
||||
|
||||
if (ret < 0) |
||||
goto out; |
||||
ret = 0; |
||||
|
||||
for (i = 0; i < nr_pages; ++i) { |
||||
j = min((unsigned)PAGE_SIZE - pageofs, outlen); |
||||
|
||||
if (pages[i] != NULL) { |
||||
if (clusterpages == 1 && |
||||
pages[i] == compressed_pages[0]) { |
||||
memcpy(vin + pageofs, vout + pageofs, j); |
||||
} else { |
||||
void *dst = kmap_atomic(pages[i]); |
||||
|
||||
memcpy(dst + pageofs, vout + pageofs, j); |
||||
kunmap_atomic(dst); |
||||
} |
||||
} |
||||
vout += PAGE_SIZE; |
||||
outlen -= j; |
||||
pageofs = 0; |
||||
} |
||||
|
||||
out: |
||||
preempt_enable(); |
||||
|
||||
if (clusterpages == 1) |
||||
kunmap_atomic(vin); |
||||
else |
||||
erofs_vunmap(vin, clusterpages); |
||||
|
||||
return ret; |
||||
} |
||||
|
||||
int z_erofs_vle_unzip_vmap(struct page **compressed_pages, |
||||
unsigned clusterpages, |
||||
void *vout, |
||||
unsigned llen, |
||||
unsigned short pageofs, |
||||
bool overlapped) |
||||
{ |
||||
void *vin; |
||||
unsigned i; |
||||
int ret; |
||||
|
||||
if (overlapped) { |
||||
preempt_disable(); |
||||
vin = erofs_pcpubuf[smp_processor_id()].data; |
||||
|
||||
for (i = 0; i < clusterpages; ++i) { |
||||
void *t = kmap_atomic(compressed_pages[i]); |
||||
|
||||
memcpy(vin + PAGE_SIZE *i, t, PAGE_SIZE); |
||||
kunmap_atomic(t); |
||||
} |
||||
} else if (clusterpages == 1) |
||||
vin = kmap_atomic(compressed_pages[0]); |
||||
else { |
||||
vin = erofs_vmap(compressed_pages, clusterpages); |
||||
} |
||||
|
||||
ret = z_erofs_unzip_lz4(vin, vout + pageofs, |
||||
clusterpages * PAGE_SIZE, llen); |
||||
if (ret > 0) |
||||
ret = 0; |
||||
|
||||
if (!overlapped) { |
||||
if (clusterpages == 1) |
||||
kunmap_atomic(vin); |
||||
else { |
||||
erofs_vunmap(vin, clusterpages); |
||||
} |
||||
} else |
||||
preempt_enable(); |
||||
|
||||
return ret; |
||||
} |
||||
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,188 @@ |
||||
/* SPDX-License-Identifier: GPL-2.0-only */ |
||||
/*
|
||||
* Copyright (C) 2018 HUAWEI, Inc. |
||||
* https://www.huawei.com/
|
||||
*/ |
||||
#ifndef __EROFS_FS_ZDATA_H |
||||
#define __EROFS_FS_ZDATA_H |
||||
|
||||
#include "internal.h" |
||||
#include "zpvec.h" |
||||
|
||||
#define Z_EROFS_NR_INLINE_PAGEVECS 3 |
||||
|
||||
/*
|
||||
* Structure fields follow one of the following exclusion rules. |
||||
* |
||||
* I: Modifiable by initialization/destruction paths and read-only |
||||
* for everyone else; |
||||
* |
||||
* L: Field should be protected by pageset lock; |
||||
* |
||||
* A: Field should be accessed / updated in atomic for parallelized code. |
||||
*/ |
||||
struct z_erofs_collection { |
||||
struct mutex lock; |
||||
|
||||
/* I: page offset of start position of decompression */ |
||||
unsigned short pageofs; |
||||
|
||||
/* L: maximum relative page index in pagevec[] */ |
||||
unsigned short nr_pages; |
||||
|
||||
/* L: total number of pages in pagevec[] */ |
||||
unsigned int vcnt; |
||||
|
||||
union { |
||||
/* L: inline a certain number of pagevecs for bootstrap */ |
||||
erofs_vtptr_t pagevec[Z_EROFS_NR_INLINE_PAGEVECS]; |
||||
|
||||
/* I: can be used to free the pcluster by RCU. */ |
||||
struct rcu_head rcu; |
||||
}; |
||||
}; |
||||
|
||||
#define Z_EROFS_PCLUSTER_FULL_LENGTH 0x00000001 |
||||
#define Z_EROFS_PCLUSTER_LENGTH_BIT 1 |
||||
|
||||
/*
|
||||
* let's leave a type here in case of introducing |
||||
* another tagged pointer later. |
||||
*/ |
||||
typedef void *z_erofs_next_pcluster_t; |
||||
|
||||
struct z_erofs_pcluster { |
||||
struct erofs_workgroup obj; |
||||
struct z_erofs_collection primary_collection; |
||||
|
||||
/* A: point to next chained pcluster or TAILs */ |
||||
z_erofs_next_pcluster_t next; |
||||
|
||||
/* A: compressed pages (including multi-usage pages) */ |
||||
struct page *compressed_pages[Z_EROFS_CLUSTER_MAX_PAGES]; |
||||
|
||||
/* A: lower limit of decompressed length and if full length or not */ |
||||
unsigned int length; |
||||
|
||||
/* I: compression algorithm format */ |
||||
unsigned char algorithmformat; |
||||
/* I: bit shift of physical cluster size */ |
||||
unsigned char clusterbits; |
||||
}; |
||||
|
||||
#define z_erofs_primarycollection(pcluster) (&(pcluster)->primary_collection) |
||||
|
||||
/* let's avoid the valid 32-bit kernel addresses */ |
||||
|
||||
/* the chained workgroup has't submitted io (still open) */ |
||||
#define Z_EROFS_PCLUSTER_TAIL ((void *)0x5F0ECAFE) |
||||
/* the chained workgroup has already submitted io */ |
||||
#define Z_EROFS_PCLUSTER_TAIL_CLOSED ((void *)0x5F0EDEAD) |
||||
|
||||
#define Z_EROFS_PCLUSTER_NIL (NULL) |
||||
|
||||
#define Z_EROFS_WORKGROUP_SIZE sizeof(struct z_erofs_pcluster) |
||||
|
||||
struct z_erofs_decompressqueue { |
||||
struct super_block *sb; |
||||
atomic_t pending_bios; |
||||
z_erofs_next_pcluster_t head; |
||||
|
||||
union { |
||||
wait_queue_head_t wait; |
||||
struct work_struct work; |
||||
} u; |
||||
}; |
||||
|
||||
#define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping) |
||||
static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi, |
||||
struct page *page) |
||||
{ |
||||
return page->mapping == MNGD_MAPPING(sbi); |
||||
} |
||||
|
||||
#define Z_EROFS_ONLINEPAGE_COUNT_BITS 2 |
||||
#define Z_EROFS_ONLINEPAGE_COUNT_MASK ((1 << Z_EROFS_ONLINEPAGE_COUNT_BITS) - 1) |
||||
#define Z_EROFS_ONLINEPAGE_INDEX_SHIFT (Z_EROFS_ONLINEPAGE_COUNT_BITS) |
||||
|
||||
/*
|
||||
* waiters (aka. ongoing_packs): # to unlock the page |
||||
* sub-index: 0 - for partial page, >= 1 full page sub-index |
||||
*/ |
||||
typedef atomic_t z_erofs_onlinepage_t; |
||||
|
||||
/* type punning */ |
||||
union z_erofs_onlinepage_converter { |
||||
z_erofs_onlinepage_t *o; |
||||
unsigned long *v; |
||||
}; |
||||
|
||||
static inline unsigned int z_erofs_onlinepage_index(struct page *page) |
||||
{ |
||||
union z_erofs_onlinepage_converter u; |
||||
|
||||
DBG_BUGON(!PagePrivate(page)); |
||||
u.v = &page_private(page); |
||||
|
||||
return atomic_read(u.o) >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT; |
||||
} |
||||
|
||||
static inline void z_erofs_onlinepage_init(struct page *page) |
||||
{ |
||||
union { |
||||
z_erofs_onlinepage_t o; |
||||
unsigned long v; |
||||
/* keep from being unlocked in advance */ |
||||
} u = { .o = ATOMIC_INIT(1) }; |
||||
|
||||
set_page_private(page, u.v); |
||||
smp_wmb(); |
||||
SetPagePrivate(page); |
||||
} |
||||
|
||||
static inline void z_erofs_onlinepage_fixup(struct page *page, |
||||
uintptr_t index, bool down) |
||||
{ |
||||
union z_erofs_onlinepage_converter u = { .v = &page_private(page) }; |
||||
int orig, orig_index, val; |
||||
|
||||
repeat: |
||||
orig = atomic_read(u.o); |
||||
orig_index = orig >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT; |
||||
if (orig_index) { |
||||
if (!index) |
||||
return; |
||||
|
||||
DBG_BUGON(orig_index != index); |
||||
} |
||||
|
||||
val = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) | |
||||
((orig & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down); |
||||
if (atomic_cmpxchg(u.o, orig, val) != orig) |
||||
goto repeat; |
||||
} |
||||
|
||||
static inline void z_erofs_onlinepage_endio(struct page *page) |
||||
{ |
||||
union z_erofs_onlinepage_converter u; |
||||
unsigned int v; |
||||
|
||||
DBG_BUGON(!PagePrivate(page)); |
||||
u.v = &page_private(page); |
||||
|
||||
v = atomic_dec_return(u.o); |
||||
if (!(v & Z_EROFS_ONLINEPAGE_COUNT_MASK)) { |
||||
ClearPagePrivate(page); |
||||
if (!PageError(page)) |
||||
SetPageUptodate(page); |
||||
unlock_page(page); |
||||
} |
||||
erofs_dbg("%s, page %p value %x", __func__, page, atomic_read(u.o)); |
||||
} |
||||
|
||||
#define Z_EROFS_VMAP_ONSTACK_PAGES \ |
||||
min_t(unsigned int, THREAD_SIZE / 8 / sizeof(struct page *), 96U) |
||||
#define Z_EROFS_VMAP_GLOBAL_PAGES 2048 |
||||
|
||||
#endif |
||||
|
@ -0,0 +1,477 @@ |
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2018-2019 HUAWEI, Inc. |
||||
* https://www.huawei.com/
|
||||
*/ |
||||
#include "internal.h" |
||||
#include <asm/unaligned.h> |
||||
#include <trace/events/erofs.h> |
||||
|
||||
int z_erofs_fill_inode(struct inode *inode) |
||||
{ |
||||
struct erofs_inode *const vi = EROFS_I(inode); |
||||
|
||||
if (vi->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY) { |
||||
vi->z_advise = 0; |
||||
vi->z_algorithmtype[0] = 0; |
||||
vi->z_algorithmtype[1] = 0; |
||||
vi->z_logical_clusterbits = LOG_BLOCK_SIZE; |
||||
vi->z_physical_clusterbits[0] = vi->z_logical_clusterbits; |
||||
vi->z_physical_clusterbits[1] = vi->z_logical_clusterbits; |
||||
set_bit(EROFS_I_Z_INITED_BIT, &vi->flags); |
||||
} |
||||
|
||||
inode->i_mapping->a_ops = &z_erofs_aops; |
||||
return 0; |
||||
} |
||||
|
||||
static int z_erofs_fill_inode_lazy(struct inode *inode) |
||||
{ |
||||
struct erofs_inode *const vi = EROFS_I(inode); |
||||
struct super_block *const sb = inode->i_sb; |
||||
int err; |
||||
erofs_off_t pos; |
||||
struct page *page; |
||||
void *kaddr; |
||||
struct z_erofs_map_header *h; |
||||
|
||||
if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) { |
||||
/*
|
||||
* paired with smp_mb() at the end of the function to ensure |
||||
* fields will only be observed after the bit is set. |
||||
*/ |
||||
smp_mb(); |
||||
return 0; |
||||
} |
||||
|
||||
if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_Z_BIT, TASK_KILLABLE)) |
||||
return -ERESTARTSYS; |
||||
|
||||
err = 0; |
||||
if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) |
||||
goto out_unlock; |
||||
|
||||
DBG_BUGON(vi->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY); |
||||
|
||||
pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize + |
||||
vi->xattr_isize, 8); |
||||
page = erofs_get_meta_page(sb, erofs_blknr(pos)); |
||||
if (IS_ERR(page)) { |
||||
err = PTR_ERR(page); |
||||
goto out_unlock; |
||||
} |
||||
|
||||
kaddr = kmap_atomic(page); |
||||
|
||||
h = kaddr + erofs_blkoff(pos); |
||||
vi->z_advise = le16_to_cpu(h->h_advise); |
||||
vi->z_algorithmtype[0] = h->h_algorithmtype & 15; |
||||
vi->z_algorithmtype[1] = h->h_algorithmtype >> 4; |
||||
|
||||
if (vi->z_algorithmtype[0] >= Z_EROFS_COMPRESSION_MAX) { |
||||
erofs_err(sb, "unknown compression format %u for nid %llu, please upgrade kernel", |
||||
vi->z_algorithmtype[0], vi->nid); |
||||
err = -EOPNOTSUPP; |
||||
goto unmap_done; |
||||
} |
||||
|
||||
vi->z_logical_clusterbits = LOG_BLOCK_SIZE + (h->h_clusterbits & 7); |
||||
vi->z_physical_clusterbits[0] = vi->z_logical_clusterbits + |
||||
((h->h_clusterbits >> 3) & 3); |
||||
|
||||
if (vi->z_physical_clusterbits[0] != LOG_BLOCK_SIZE) { |
||||
erofs_err(sb, "unsupported physical clusterbits %u for nid %llu, please upgrade kernel", |
||||
vi->z_physical_clusterbits[0], vi->nid); |
||||
err = -EOPNOTSUPP; |
||||
goto unmap_done; |
||||
} |
||||
|
||||
vi->z_physical_clusterbits[1] = vi->z_logical_clusterbits + |
||||
((h->h_clusterbits >> 5) & 7); |
||||
/* paired with smp_mb() at the beginning of the function */ |
||||
smp_mb(); |
||||
set_bit(EROFS_I_Z_INITED_BIT, &vi->flags); |
||||
unmap_done: |
||||
kunmap_atomic(kaddr); |
||||
unlock_page(page); |
||||
put_page(page); |
||||
out_unlock: |
||||
clear_and_wake_up_bit(EROFS_I_BL_Z_BIT, &vi->flags); |
||||
return err; |
||||
} |
||||
|
||||
struct z_erofs_maprecorder { |
||||
struct inode *inode; |
||||
struct erofs_map_blocks *map; |
||||
void *kaddr; |
||||
|
||||
unsigned long lcn; |
||||
/* compression extent information gathered */ |
||||
u8 type; |
||||
u16 clusterofs; |
||||
u16 delta[2]; |
||||
erofs_blk_t pblk; |
||||
}; |
||||
|
||||
static int z_erofs_reload_indexes(struct z_erofs_maprecorder *m, |
||||
erofs_blk_t eblk) |
||||
{ |
||||
struct super_block *const sb = m->inode->i_sb; |
||||
struct erofs_map_blocks *const map = m->map; |
||||
struct page *mpage = map->mpage; |
||||
|
||||
if (mpage) { |
||||
if (mpage->index == eblk) { |
||||
if (!m->kaddr) |
||||
m->kaddr = kmap_atomic(mpage); |
||||
return 0; |
||||
} |
||||
|
||||
if (m->kaddr) { |
||||
kunmap_atomic(m->kaddr); |
||||
m->kaddr = NULL; |
||||
} |
||||
put_page(mpage); |
||||
} |
||||
|
||||
mpage = erofs_get_meta_page(sb, eblk); |
||||
if (IS_ERR(mpage)) { |
||||
map->mpage = NULL; |
||||
return PTR_ERR(mpage); |
||||
} |
||||
m->kaddr = kmap_atomic(mpage); |
||||
unlock_page(mpage); |
||||
map->mpage = mpage; |
||||
return 0; |
||||
} |
||||
|
||||
static int legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m, |
||||
unsigned long lcn) |
||||
{ |
||||
struct inode *const inode = m->inode; |
||||
struct erofs_inode *const vi = EROFS_I(inode); |
||||
const erofs_off_t ibase = iloc(EROFS_I_SB(inode), vi->nid); |
||||
const erofs_off_t pos = |
||||
Z_EROFS_VLE_LEGACY_INDEX_ALIGN(ibase + vi->inode_isize + |
||||
vi->xattr_isize) + |
||||
lcn * sizeof(struct z_erofs_vle_decompressed_index); |
||||
struct z_erofs_vle_decompressed_index *di; |
||||
unsigned int advise, type; |
||||
int err; |
||||
|
||||
err = z_erofs_reload_indexes(m, erofs_blknr(pos)); |
||||
if (err) |
||||
return err; |
||||
|
||||
m->lcn = lcn; |
||||
di = m->kaddr + erofs_blkoff(pos); |
||||
|
||||
advise = le16_to_cpu(di->di_advise); |
||||
type = (advise >> Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT) & |
||||
((1 << Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) - 1); |
||||
switch (type) { |
||||
case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: |
||||
m->clusterofs = 1 << vi->z_logical_clusterbits; |
||||
m->delta[0] = le16_to_cpu(di->di_u.delta[0]); |
||||
m->delta[1] = le16_to_cpu(di->di_u.delta[1]); |
||||
break; |
||||
case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: |
||||
case Z_EROFS_VLE_CLUSTER_TYPE_HEAD: |
||||
m->clusterofs = le16_to_cpu(di->di_clusterofs); |
||||
m->pblk = le32_to_cpu(di->di_u.blkaddr); |
||||
break; |
||||
default: |
||||
DBG_BUGON(1); |
||||
return -EOPNOTSUPP; |
||||
} |
||||
m->type = type; |
||||
return 0; |
||||
} |
||||
|
||||
static unsigned int decode_compactedbits(unsigned int lobits, |
||||
unsigned int lomask, |
||||
u8 *in, unsigned int pos, u8 *type) |
||||
{ |
||||
const unsigned int v = get_unaligned_le32(in + pos / 8) >> (pos & 7); |
||||
const unsigned int lo = v & lomask; |
||||
|
||||
*type = (v >> lobits) & 3; |
||||
return lo; |
||||
} |
||||
|
||||
static int unpack_compacted_index(struct z_erofs_maprecorder *m, |
||||
unsigned int amortizedshift, |
||||
unsigned int eofs) |
||||
{ |
||||
struct erofs_inode *const vi = EROFS_I(m->inode); |
||||
const unsigned int lclusterbits = vi->z_logical_clusterbits; |
||||
const unsigned int lomask = (1 << lclusterbits) - 1; |
||||
unsigned int vcnt, base, lo, encodebits, nblk; |
||||
int i; |
||||
u8 *in, type; |
||||
|
||||
if (1 << amortizedshift == 4) |
||||
vcnt = 2; |
||||
else if (1 << amortizedshift == 2 && lclusterbits == 12) |
||||
vcnt = 16; |
||||
else |
||||
return -EOPNOTSUPP; |
||||
|
||||
encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt; |
||||
base = round_down(eofs, vcnt << amortizedshift); |
||||
in = m->kaddr + base; |
||||
|
||||
i = (eofs - base) >> amortizedshift; |
||||
|
||||
lo = decode_compactedbits(lclusterbits, lomask, |
||||
in, encodebits * i, &type); |
||||
m->type = type; |
||||
if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) { |
||||
m->clusterofs = 1 << lclusterbits; |
||||
if (i + 1 != vcnt) { |
||||
m->delta[0] = lo; |
||||
return 0; |
||||
} |
||||
/*
|
||||
* since the last lcluster in the pack is special, |
||||
* of which lo saves delta[1] rather than delta[0]. |
||||
* Hence, get delta[0] by the previous lcluster indirectly. |
||||
*/ |
||||
lo = decode_compactedbits(lclusterbits, lomask, |
||||
in, encodebits * (i - 1), &type); |
||||
if (type != Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) |
||||
lo = 0; |
||||
m->delta[0] = lo + 1; |
||||
return 0; |
||||
} |
||||
m->clusterofs = lo; |
||||
m->delta[0] = 0; |
||||
/* figout out blkaddr (pblk) for HEAD lclusters */ |
||||
nblk = 1; |
||||
while (i > 0) { |
||||
--i; |
||||
lo = decode_compactedbits(lclusterbits, lomask, |
||||
in, encodebits * i, &type); |
||||
if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) |
||||
i -= lo; |
||||
|
||||
if (i >= 0) |
||||
++nblk; |
||||
} |
||||
in += (vcnt << amortizedshift) - sizeof(__le32); |
||||
m->pblk = le32_to_cpu(*(__le32 *)in) + nblk; |
||||
return 0; |
||||
} |
||||
|
||||
static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m, |
||||
unsigned long lcn) |
||||
{ |
||||
struct inode *const inode = m->inode; |
||||
struct erofs_inode *const vi = EROFS_I(inode); |
||||
const unsigned int lclusterbits = vi->z_logical_clusterbits; |
||||
const erofs_off_t ebase = ALIGN(iloc(EROFS_I_SB(inode), vi->nid) + |
||||
vi->inode_isize + vi->xattr_isize, 8) + |
||||
sizeof(struct z_erofs_map_header); |
||||
const unsigned int totalidx = DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ); |
||||
unsigned int compacted_4b_initial, compacted_2b; |
||||
unsigned int amortizedshift; |
||||
erofs_off_t pos; |
||||
int err; |
||||
|
||||
if (lclusterbits != 12) |
||||
return -EOPNOTSUPP; |
||||
|
||||
if (lcn >= totalidx) |
||||
return -EINVAL; |
||||
|
||||
m->lcn = lcn; |
||||
/* used to align to 32-byte (compacted_2b) alignment */ |
||||
compacted_4b_initial = (32 - ebase % 32) / 4; |
||||
if (compacted_4b_initial == 32 / 4) |
||||
compacted_4b_initial = 0; |
||||
|
||||
if (vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) |
||||
compacted_2b = rounddown(totalidx - compacted_4b_initial, 16); |
||||
else |
||||
compacted_2b = 0; |
||||
|
||||
pos = ebase; |
||||
if (lcn < compacted_4b_initial) { |
||||
amortizedshift = 2; |
||||
goto out; |
||||
} |
||||
pos += compacted_4b_initial * 4; |
||||
lcn -= compacted_4b_initial; |
||||
|
||||
if (lcn < compacted_2b) { |
||||
amortizedshift = 1; |
||||
goto out; |
||||
} |
||||
pos += compacted_2b * 2; |
||||
lcn -= compacted_2b; |
||||
amortizedshift = 2; |
||||
out: |
||||
pos += lcn * (1 << amortizedshift); |
||||
err = z_erofs_reload_indexes(m, erofs_blknr(pos)); |
||||
if (err) |
||||
return err; |
||||
return unpack_compacted_index(m, amortizedshift, erofs_blkoff(pos)); |
||||
} |
||||
|
||||
static int z_erofs_load_cluster_from_disk(struct z_erofs_maprecorder *m, |
||||
unsigned int lcn) |
||||
{ |
||||
const unsigned int datamode = EROFS_I(m->inode)->datalayout; |
||||
|
||||
if (datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY) |
||||
return legacy_load_cluster_from_disk(m, lcn); |
||||
|
||||
if (datamode == EROFS_INODE_FLAT_COMPRESSION) |
||||
return compacted_load_cluster_from_disk(m, lcn); |
||||
|
||||
return -EINVAL; |
||||
} |
||||
|
||||
static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m, |
||||
unsigned int lookback_distance) |
||||
{ |
||||
struct erofs_inode *const vi = EROFS_I(m->inode); |
||||
struct erofs_map_blocks *const map = m->map; |
||||
const unsigned int lclusterbits = vi->z_logical_clusterbits; |
||||
unsigned long lcn = m->lcn; |
||||
int err; |
||||
|
||||
if (lcn < lookback_distance) { |
||||
erofs_err(m->inode->i_sb, |
||||
"bogus lookback distance @ nid %llu", vi->nid); |
||||
DBG_BUGON(1); |
||||
return -EFSCORRUPTED; |
||||
} |
||||
|
||||
/* load extent head logical cluster if needed */ |
||||
lcn -= lookback_distance; |
||||
err = z_erofs_load_cluster_from_disk(m, lcn); |
||||
if (err) |
||||
return err; |
||||
|
||||
switch (m->type) { |
||||
case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: |
||||
if (!m->delta[0]) { |
||||
erofs_err(m->inode->i_sb, |
||||
"invalid lookback distance 0 @ nid %llu", |
||||
vi->nid); |
||||
DBG_BUGON(1); |
||||
return -EFSCORRUPTED; |
||||
} |
||||
return z_erofs_extent_lookback(m, m->delta[0]); |
||||
case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: |
||||
map->m_flags &= ~EROFS_MAP_ZIPPED; |
||||
/* fallthrough */ |
||||
case Z_EROFS_VLE_CLUSTER_TYPE_HEAD: |
||||
map->m_la = (lcn << lclusterbits) | m->clusterofs; |
||||
break; |
||||
default: |
||||
erofs_err(m->inode->i_sb, |
||||
"unknown type %u @ lcn %lu of nid %llu", |
||||
m->type, lcn, vi->nid); |
||||
DBG_BUGON(1); |
||||
return -EOPNOTSUPP; |
||||
} |
||||
return 0; |
||||
} |
||||
|
||||
int z_erofs_map_blocks_iter(struct inode *inode, |
||||
struct erofs_map_blocks *map, |
||||
int flags) |
||||
{ |
||||
struct erofs_inode *const vi = EROFS_I(inode); |
||||
struct z_erofs_maprecorder m = { |
||||
.inode = inode, |
||||
.map = map, |
||||
}; |
||||
int err = 0; |
||||
unsigned int lclusterbits, endoff; |
||||
unsigned long long ofs, end; |
||||
|
||||
trace_z_erofs_map_blocks_iter_enter(inode, map, flags); |
||||
|
||||
/* when trying to read beyond EOF, leave it unmapped */ |
||||
if (map->m_la >= inode->i_size) { |
||||
map->m_llen = map->m_la + 1 - inode->i_size; |
||||
map->m_la = inode->i_size; |
||||
map->m_flags = 0; |
||||
goto out; |
||||
} |
||||
|
||||
err = z_erofs_fill_inode_lazy(inode); |
||||
if (err) |
||||
goto out; |
||||
|
||||
lclusterbits = vi->z_logical_clusterbits; |
||||
ofs = map->m_la; |
||||
m.lcn = ofs >> lclusterbits; |
||||
endoff = ofs & ((1 << lclusterbits) - 1); |
||||
|
||||
err = z_erofs_load_cluster_from_disk(&m, m.lcn); |
||||
if (err) |
||||
goto unmap_out; |
||||
|
||||
map->m_flags = EROFS_MAP_ZIPPED; /* by default, compressed */ |
||||
end = (m.lcn + 1ULL) << lclusterbits; |
||||
|
||||
switch (m.type) { |
||||
case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: |
||||
if (endoff >= m.clusterofs) |
||||
map->m_flags &= ~EROFS_MAP_ZIPPED; |
||||
/* fallthrough */ |
||||
case Z_EROFS_VLE_CLUSTER_TYPE_HEAD: |
||||
if (endoff >= m.clusterofs) { |
||||
map->m_la = (m.lcn << lclusterbits) | m.clusterofs; |
||||
break; |
||||
} |
||||
/* m.lcn should be >= 1 if endoff < m.clusterofs */ |
||||
if (!m.lcn) { |
||||
erofs_err(inode->i_sb, |
||||
"invalid logical cluster 0 at nid %llu", |
||||
vi->nid); |
||||
err = -EFSCORRUPTED; |
||||
goto unmap_out; |
||||
} |
||||
end = (m.lcn << lclusterbits) | m.clusterofs; |
||||
map->m_flags |= EROFS_MAP_FULL_MAPPED; |
||||
m.delta[0] = 1; |
||||
/* fallthrough */ |
||||
case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: |
||||
/* get the correspoinding first chunk */ |
||||
err = z_erofs_extent_lookback(&m, m.delta[0]); |
||||
if (err) |
||||
goto unmap_out; |
||||
break; |
||||
default: |
||||
erofs_err(inode->i_sb, |
||||
"unknown type %u @ offset %llu of nid %llu", |
||||
m.type, ofs, vi->nid); |
||||
err = -EOPNOTSUPP; |
||||
goto unmap_out; |
||||
} |
||||
|
||||
map->m_llen = end - map->m_la; |
||||
map->m_plen = 1 << lclusterbits; |
||||
map->m_pa = blknr_to_addr(m.pblk); |
||||
map->m_flags |= EROFS_MAP_MAPPED; |
||||
|
||||
unmap_out: |
||||
if (m.kaddr) |
||||
kunmap_atomic(m.kaddr); |
||||
|
||||
out: |
||||
erofs_dbg("%s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o", |
||||
__func__, map->m_la, map->m_pa, |
||||
map->m_llen, map->m_plen, map->m_flags); |
||||
|
||||
trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err); |
||||
|
||||
/* aggressively BUG_ON iff CONFIG_EROFS_FS_DEBUG is on */ |
||||
DBG_BUGON(err < 0 && err != -ENOMEM); |
||||
return err; |
||||
} |
Loading…
Reference in new issue