The following commit has been merged in the master branch: commit 371c141464b8312ee4a298fad6d17ee26654b7d6 Merge: ca83c61cb3db964061ea186654bf8e1879589de3 d0fa70aca54c8643248e89061da23752506ec0d4 Author: Linus Torvalds torvalds@linux-foundation.org Date: Tue Jul 23 15:15:16 2024 -0700
Merge tag 'jfs-6.11' of github.com:kleikamp/linux-shaggy
Pull jfs updates from David Kleikamp: "Folio conversion from Matthew Wilcox and a few various fixes"
* tag 'jfs-6.11' of github.com:kleikamp/linux-shaggy: jfs: don't walk off the end of ealist jfs: Fix shift-out-of-bounds in dbDiscardAG jfs: Fix array-index-out-of-bounds in diFree jfs: fix null ptr deref in dtInsertEntry jfs: Remove use of folio error flag fs: Remove i_blocks_per_page jfs: Change metapage->page to metapage->folio jfs: Convert force_metapage to use a folio jfs: Convert inc_io to take a folio jfs: Convert page_to_mp to folio_to_mp jfs; Convert __invalidate_metapages to use a folio jfs: Convert dec_io to take a folio jfs: Convert drop_metapage and remove_metapage to take a folio jfs; Convert release_metapage to use a folio jfs: Convert insert_metapage() to take a folio jfs: Convert __get_metapage to use a folio jfs: Convert metapage_writepage to metapage_write_folio jfs: Convert metapage_read_folio to use folio APIs
diff --combined fs/jfs/xattr.c index 9987055293b35,ab9b85ce7ff76..2999ed5d83f5e --- a/fs/jfs/xattr.c +++ b/fs/jfs/xattr.c @@@ -557,11 -557,9 +557,11 @@@ static int ea_get(struct inode *inode,
size_check: if (EALIST_SIZE(ea_buf->xattr) != ea_size) { + int size = min_t(int, EALIST_SIZE(ea_buf->xattr), ea_size); + printk(KERN_ERR "ea_get: invalid extended attribute\n"); print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, - ea_buf->xattr, ea_size, 1); + ea_buf->xattr, size, 1); ea_release(inode, ea_buf); rc = -EIO; goto clean_up; @@@ -797,7 -795,7 +797,7 @@@ ssize_t __jfs_getxattr(struct inode *in size_t buf_size) { struct jfs_ea_list *ealist; - struct jfs_ea *ea; + struct jfs_ea *ea, *ealist_end; struct ea_buffer ea_buf; int xattr_size; ssize_t size; @@@ -817,9 -815,16 +817,16 @@@ goto not_found;
ealist = (struct jfs_ea_list *) ea_buf.xattr; + ealist_end = END_EALIST(ealist);
/* Find the named attribute */ - for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea)) + for (ea = FIRST_EA(ealist); ea < ealist_end; ea = NEXT_EA(ea)) { + if (unlikely(ea + 1 > ealist_end) || + unlikely(NEXT_EA(ea) > ealist_end)) { + size = -EUCLEAN; + goto release; + } + if ((namelen == ea->namelen) && memcmp(name, ea->name, namelen) == 0) { /* Found it */ @@@ -834,6 -839,7 +841,7 @@@ memcpy(data, value, size); goto release; } + } not_found: size = -ENODATA; release: @@@ -861,7 -867,7 +869,7 @@@ ssize_t jfs_listxattr(struct dentry * d ssize_t size = 0; int xattr_size; struct jfs_ea_list *ealist; - struct jfs_ea *ea; + struct jfs_ea *ea, *ealist_end; struct ea_buffer ea_buf;
down_read(&JFS_IP(inode)->xattr_sem); @@@ -876,9 -882,16 +884,16 @@@ goto release;
ealist = (struct jfs_ea_list *) ea_buf.xattr; + ealist_end = END_EALIST(ealist);
/* compute required size of list */ - for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea)) { + for (ea = FIRST_EA(ealist); ea < ealist_end; ea = NEXT_EA(ea)) { + if (unlikely(ea + 1 > ealist_end) || + unlikely(NEXT_EA(ea) > ealist_end)) { + size = -EUCLEAN; + goto release; + } + if (can_list(ea)) size += name_size(ea) + 1; } diff --combined include/linux/pagemap.h index 483a191bb4df4,63f2f3602a7ff..d9c7edb6422bd --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@@ -208,8 -208,7 +208,8 @@@ enum mapping_flags AS_RELEASE_ALWAYS, /* Call ->release_folio(), even if no private data */ AS_STABLE_WRITES, /* must wait for writeback before modifying folio contents */ - AS_UNMOVABLE, /* The mapping cannot be moved, ever */ + AS_INACCESSIBLE, /* Do not attempt direct R/W access to the mapping, + including to move the mapping */ };
/** @@@ -310,20 -309,20 +310,20 @@@ static inline void mapping_clear_stable clear_bit(AS_STABLE_WRITES, &mapping->flags); }
-static inline void mapping_set_unmovable(struct address_space *mapping) +static inline void mapping_set_inaccessible(struct address_space *mapping) { /* - * It's expected unmovable mappings are also unevictable. Compaction + * It's expected inaccessible mappings are also unevictable. Compaction * migrate scanner (isolate_migratepages_block()) relies on this to * reduce page locking. */ set_bit(AS_UNEVICTABLE, &mapping->flags); - set_bit(AS_UNMOVABLE, &mapping->flags); + set_bit(AS_INACCESSIBLE, &mapping->flags); }
-static inline bool mapping_unmovable(struct address_space *mapping) +static inline bool mapping_inaccessible(struct address_space *mapping) { - return test_bit(AS_UNMOVABLE, &mapping->flags); + return test_bit(AS_INACCESSIBLE, &mapping->flags); }
static inline gfp_t mapping_gfp_mask(struct address_space * mapping) @@@ -347,26 -346,6 +347,26 @@@ static inline void mapping_set_gfp_mask m->gfp_mask = mask; }
+/* + * There are some parts of the kernel which assume that PMD entries + * are exactly HPAGE_PMD_ORDER. Those should be fixed, but until then, + * limit the maximum allocation order to PMD size. I'm not aware of any + * assumptions about maximum order if THP are disabled, but 8 seems like + * a good order (that's 1MB if you're using 4kB pages) + */ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define PREFERRED_MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER +#else +#define PREFERRED_MAX_PAGECACHE_ORDER 8 +#endif + +/* + * xas_split_alloc() does not support arbitrary orders. This implies no + * 512MB THP on ARM64 with 64KB base page size. + */ +#define MAX_XAS_ORDER (XA_CHUNK_SHIFT * 2 - 1) +#define MAX_PAGECACHE_ORDER min(MAX_XAS_ORDER, PREFERRED_MAX_PAGECACHE_ORDER) + /** * mapping_set_large_folios() - Indicate the file supports large folios. * @mapping: The file. @@@ -389,22 -368,10 +389,22 @@@ static inline void mapping_set_large_fo */ static inline bool mapping_large_folio_support(struct address_space *mapping) { + /* AS_LARGE_FOLIO_SUPPORT is only reasonable for pagecache folios */ + VM_WARN_ONCE((unsigned long)mapping & PAGE_MAPPING_ANON, + "Anonymous mapping always supports large folio"); + return IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags); }
+/* Return the maximum folio size for this pagecache mapping, in bytes. */ +static inline size_t mapping_max_folio_size(struct address_space *mapping) +{ + if (mapping_large_folio_support(mapping)) + return PAGE_SIZE << MAX_PAGECACHE_ORDER; + return PAGE_SIZE; +} + static inline int filemap_nr_thps(struct address_space *mapping) { #ifdef CONFIG_READ_ONLY_THP_FOR_FS @@@ -434,6 -401,7 +434,6 @@@ static inline void filemap_nr_thps_dec( #endif }
-struct address_space *page_mapping(struct page *); struct address_space *folio_mapping(struct folio *); struct address_space *swapcache_mapping(struct folio *);
@@@ -562,6 -530,19 +562,6 @@@ static inline void *detach_page_private return folio_detach_private(page_folio(page)); }
-/* - * There are some parts of the kernel which assume that PMD entries - * are exactly HPAGE_PMD_ORDER. Those should be fixed, but until then, - * limit the maximum allocation order to PMD size. I'm not aware of any - * assumptions about maximum order if THP are disabled, but 8 seems like - * a good order (that's 1MB if you're using 4kB pages) - */ -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -#define MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER -#else -#define MAX_PAGECACHE_ORDER 8 -#endif - #ifdef CONFIG_NUMA struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order); #else @@@ -799,7 -780,7 +799,7 @@@ static inline struct page *grab_cache_p mapping_gfp_mask(mapping)); }
-#define swapcache_index(folio) __page_file_index(&(folio)->page) +extern pgoff_t __folio_swap_cache_index(struct folio *folio);
/** * folio_index - File index of a folio. @@@ -814,9 -795,9 +814,9 @@@ */ static inline pgoff_t folio_index(struct folio *folio) { - if (unlikely(folio_test_swapcache(folio))) - return swapcache_index(folio); - return folio->index; + if (unlikely(folio_test_swapcache(folio))) + return __folio_swap_cache_index(folio); + return folio->index; }
/** @@@ -939,6 -920,11 +939,6 @@@ static inline loff_t page_offset(struc return ((loff_t)page->index) << PAGE_SHIFT; }
-static inline loff_t page_file_offset(struct page *page) -{ - return ((loff_t)page_index(page)) << PAGE_SHIFT; -} - /** * folio_pos - Returns the byte position of this folio in its file. * @folio: The folio. @@@ -948,6 -934,18 +948,6 @@@ static inline loff_t folio_pos(struct f return page_offset(&folio->page); }
-/** - * folio_file_pos - Returns the byte position of this folio in its file. - * @folio: The folio. - * - * This differs from folio_pos() for folios which belong to a swap file. - * NFS is the only filesystem today which needs to use folio_file_pos(). - */ -static inline loff_t folio_file_pos(struct folio *folio) -{ - return page_file_offset(&folio->page); -} - /* * Get the offset in PAGE_SIZE (even for hugetlb folios). */ @@@ -1301,7 -1299,8 +1301,7 @@@ void page_cache_sync_readahead(struct a * @mapping: address_space which holds the pagecache and I/O vectors * @ra: file_ra_state which holds the readahead state * @file: Used by the filesystem for authentication. - * @folio: The folio at @index which triggered the readahead call. - * @index: Index of first page to be read. + * @folio: The folio which triggered the readahead call. * @req_count: Total number of pages being read by the caller. * * page_cache_async_readahead() should be called when a page is used which @@@ -1312,9 -1311,9 +1312,9 @@@ static inline void page_cache_async_readahead(struct address_space *mapping, struct file_ra_state *ra, struct file *file, - struct folio *folio, pgoff_t index, unsigned long req_count) + struct folio *folio, unsigned long req_count) { - DEFINE_READAHEAD(ractl, file, ra, mapping, index); + DEFINE_READAHEAD(ractl, file, ra, mapping, folio->index); page_cache_async_ra(&ractl, folio, req_count); }
@@@ -1537,10 -1536,4 +1537,4 @@@ unsigned int i_blocks_per_folio(struct { return folio_size(folio) >> inode->i_blkbits; } - - static inline - unsigned int i_blocks_per_page(struct inode *inode, struct page *page) - { - return i_blocks_per_folio(inode, page_folio(page)); - } #endif /* _LINUX_PAGEMAP_H */
linux-merge@lists.open-mesh.org