@ -119,44 +119,85 @@ static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq,
return kaddr ? 1 : 0 ;
}
static void * generic_copy_inplace_data ( struct z_erofs_decompress_req * rq ,
u8 * src , unsigned int pageofs_in )
static void * z_erofs_handle_inplace_io ( struct z_erofs_decompress_req * rq ,
void * inpage , unsigned int * inputmargin , int * maptype ,
bool support_0padding )
{
/*
* if in - place decompression is ongoing , those decompressed
* pages should be copied in order to avoid being overlapped .
*/
struct page * * in = rq - > in ;
u8 * const tmp = erofs_get_pcpubuf ( 1 ) ;
u8 * tmpp = tmp ;
unsigned int inlen = rq - > inputsize - pageofs_in ;
unsigned int count = min_t ( uint , inlen , PAGE_SIZE - pageofs_in ) ;
while ( tmpp < tmp + inlen ) {
if ( ! src )
src = kmap_atomic ( * in ) ;
memcpy ( tmpp , src + pageofs_in , count ) ;
kunmap_atomic ( src ) ;
src = NULL ;
tmpp + = count ;
pageofs_in = 0 ;
count = PAGE_SIZE ;
unsigned int nrpages_in , nrpages_out ;
unsigned int ofull , oend , inputsize , total , i , j ;
struct page * * in ;
void * src , * tmp ;
inputsize = rq - > inputsize ;
nrpages_in = PAGE_ALIGN ( inputsize ) > > PAGE_SHIFT ;
oend = rq - > pageofs_out + rq - > outputsize ;
ofull = PAGE_ALIGN ( oend ) ;
nrpages_out = ofull > > PAGE_SHIFT ;
if ( rq - > inplace_io ) {
if ( rq - > partial_decoding | | ! support_0padding | |
ofull - oend < LZ4_DECOMPRESS_INPLACE_MARGIN ( inputsize ) )
goto docopy ;
for ( i = 0 ; i < nrpages_in ; + + i ) {
DBG_BUGON ( rq - > in [ i ] = = NULL ) ;
for ( j = 0 ; j < nrpages_out - nrpages_in + i ; + + j )
if ( rq - > out [ j ] = = rq - > in [ i ] )
goto docopy ;
}
}
if ( nrpages_in < = 1 ) {
* maptype = 0 ;
return inpage ;
}
kunmap_atomic ( inpage ) ;
might_sleep ( ) ;
src = erofs_vm_map_ram ( rq - > in , nrpages_in ) ;
if ( ! src )
return ERR_PTR ( - ENOMEM ) ;
* maptype = 1 ;
return src ;
docopy :
/* Or copy compressed data which can be overlapped to per-CPU buffer */
in = rq - > in ;
src = erofs_get_pcpubuf ( nrpages_in ) ;
if ( ! src ) {
DBG_BUGON ( 1 ) ;
kunmap_atomic ( inpage ) ;
return ERR_PTR ( - EFAULT ) ;
}
tmp = src ;
total = rq - > inputsize ;
while ( total ) {
unsigned int page_copycnt =
min_t ( unsigned int , total , PAGE_SIZE - * inputmargin ) ;
if ( ! inpage )
inpage = kmap_atomic ( * in ) ;
memcpy ( tmp , inpage + * inputmargin , page_copycnt ) ;
kunmap_atomic ( inpage ) ;
inpage = NULL ;
tmp + = page_copycnt ;
total - = page_copycnt ;
+ + in ;
* inputmargin = 0 ;
}
return tmp ;
* maptype = 2 ;
return src ;
}
static int z_erofs_lz4_decompress ( struct z_erofs_decompress_req * rq , u8 * out )
{
unsigned int inputmargin , inlen ;
u8 * src ;
bool copied , support_0padding ;
int ret ;
if ( rq - > inputsize > PAGE_SIZE )
return - EOPNOTSUPP ;
unsigned int inputmargin ;
u8 * headpage , * src ;
bool support_0padding ;
int ret , maptype ;
src = kmap_atomic ( * rq - > in ) ;
DBG_BUGON ( * rq - > in = = NULL ) ;
headpage = kmap_atomic ( * rq - > in ) ;
inputmargin = 0 ;
support_0padding = false ;
@ -164,50 +205,37 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out)
if ( erofs_sb_has_lz4_0padding ( EROFS_SB ( rq - > sb ) ) ) {
support_0padding = true ;
while ( ! src [ inputmargin & ~ PAGE_MASK ] )
while ( ! headpage [ inputmargin & ~ PAGE_MASK ] )
if ( ! ( + + inputmargin & ~ PAGE_MASK ) )
break ;
if ( inputmargin > = rq - > inputsize ) {
kunmap_atomic ( src ) ;
kunmap_atomic ( headpage ) ;
return - EIO ;
}
}
copied = false ;
inlen = rq - > inputsize - inputmargin ;
if ( rq - > inplace_io ) {
const uint oend = ( rq - > pageofs_out +
rq - > outputsize ) & ~ PAGE_MASK ;
const uint nr = PAGE_ALIGN ( rq - > pageofs_out +
rq - > outputsize ) > > PAGE_SHIFT ;
if ( rq - > partial_decoding | | ! support_0padding | |
rq - > out [ nr - 1 ] ! = rq - > in [ 0 ] | |
rq - > inputsize - oend <
LZ4_DECOMPRESS_INPLACE_MARGIN ( inlen ) ) {
src = generic_copy_inplace_data ( rq , src , inputmargin ) ;
inputmargin = 0 ;
copied = true ;
}
}
rq - > inputsize - = inputmargin ;
src = z_erofs_handle_inplace_io ( rq , headpage , & inputmargin , & maptype ,
support_0padding ) ;
if ( IS_ERR ( src ) )
return PTR_ERR ( src ) ;
/* legacy format could compress extra data in a pcluster. */
if ( rq - > partial_decoding | | ! support_0padding )
ret = LZ4_decompress_safe_partial ( src + inputmargin , out ,
inlen , rq - > outputsize ,
rq - > outputsize ) ;
rq - > inputsize , rq - > outputsize , rq - > outputsize ) ;
else
ret = LZ4_decompress_safe ( src + inputmargin , out ,
inlen , rq - > outputsize ) ;
rq - > inputsize , rq - > outputsize ) ;
if ( ret ! = rq - > outputsize ) {
erofs_err ( rq - > sb , " failed to decompress %d in[%u, %u] out[%u] " ,
ret , inlen , inputmargin , rq - > outputsize ) ;
ret , rq - > inputsize , inputmargin , rq - > outputsize ) ;
WARN_ON ( 1 ) ;
print_hex_dump ( KERN_DEBUG , " [ in]: " , DUMP_PREFIX_OFFSET ,
16 , 1 , src + inputmargin , inlen , true ) ;
16 , 1 , src + inputmargin , rq - > inputsize , true ) ;
print_hex_dump ( KERN_DEBUG , " [out]: " , DUMP_PREFIX_OFFSET ,
16 , 1 , out , rq - > outputsize , true ) ;
@ -216,10 +244,16 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out)
ret = - EIO ;
}
if ( copied )
erofs_put_pcpubuf ( src ) ;
else
if ( maptype = = 0 ) {
kunmap_atomic ( src ) ;
} else if ( maptype = = 1 ) {
vm_unmap_ram ( src , PAGE_ALIGN ( rq - > inputsize ) > > PAGE_SHIFT ) ;
} else if ( maptype = = 2 ) {
erofs_put_pcpubuf ( src ) ;
} else {
DBG_BUGON ( 1 ) ;
return - EFAULT ;
}
return ret ;
}
@ -269,57 +303,51 @@ static int z_erofs_decompress_generic(struct z_erofs_decompress_req *rq,
const struct z_erofs_decompressor * alg = decompressors + rq - > alg ;
unsigned int dst_maptype ;
void * dst ;
int ret , i ;
int ret ;
if ( nrpages_out = = 1 & & ! rq - > inplace_io ) {
DBG_BUGON ( ! * rq - > out ) ;
dst = kmap_atomic ( * rq - > out ) ;
dst_maptype = 0 ;
goto dstmap_out ;
}
/* two optimized fast paths only for non bigpcluster cases yet */
if ( rq - > inputsize < = PAGE_SIZE ) {
if ( nrpages_out = = 1 & & ! rq - > inplace_io ) {
DBG_BUGON ( ! * rq - > out ) ;
dst = kmap_atomic ( * rq - > out ) ;
dst_maptype = 0 ;
goto dstmap_out ;
}
/*
* For the case of small output size ( especially much less
* than PAGE_SIZE ) , memcpy the decompressed data rather than
* compressed data is preferred .
*/
if ( rq - > outputsize < = PAGE_SIZE * 7 / 8 ) {
dst = erofs_get_pcpubuf ( 1 ) ;
if ( IS_ERR ( dst ) )
return PTR_ERR ( dst ) ;
rq - > inplace_io = false ;
ret = alg - > decompress ( rq , dst ) ;
if ( ! ret )
copy_from_pcpubuf ( rq - > out , dst , rq - > pageofs_out ,
rq - > outputsize ) ;
erofs_put_pcpubuf ( dst ) ;
return ret ;
/*
* For the case of small output size ( especially much less
* than PAGE_SIZE ) , memcpy the decompressed data rather than
* compressed data is preferred .
*/
if ( rq - > outputsize < = PAGE_SIZE * 7 / 8 ) {
dst = erofs_get_pcpubuf ( 1 ) ;
if ( IS_ERR ( dst ) )
return PTR_ERR ( dst ) ;
rq - > inplace_io = false ;
ret = alg - > decompress ( rq , dst ) ;
if ( ! ret )
copy_from_pcpubuf ( rq - > out , dst , rq - > pageofs_out ,
rq - > outputsize ) ;
erofs_put_pcpubuf ( dst ) ;
return ret ;
}
}
/* general decoding path which can be used for all cases */
ret = alg - > prepare_destpages ( rq , pagepool ) ;
if ( ret < 0 ) {
if ( ret < 0 )
return ret ;
} else if ( ret ) {
if ( ret ) {
dst = page_address ( * rq - > out ) ;
dst_maptype = 1 ;
goto dstmap_out ;
}
i = 0 ;
while ( 1 ) {
dst = vm_map_ram ( rq - > out , nrpages_out , - 1 , PAGE_KERNEL ) ;
/* retry two more times (totally 3 times) */
if ( dst | | + + i > = 3 )
break ;
vm_unmap_aliases ( ) ;
}
dst = erofs_vm_map_ram ( rq - > out , nrpages_out ) ;
if ( ! dst )
return - ENOMEM ;
dst_maptype = 2 ;
dstmap_out :