diff --git a/fs/afs/dir_edit.c b/fs/afs/dir_edit.c index f0eddccbdd954154c67e28b1db8239b764e97c2b..e2fa577b66fe0ace93722e303aa54de7f8c47fc4 100644 --- a/fs/afs/dir_edit.c +++ b/fs/afs/dir_edit.c @@ -115,11 +115,12 @@ static struct folio *afs_dir_get_folio(struct afs_vnode *vnode, pgoff_t index) folio = __filemap_get_folio(mapping, index, FGP_LOCK | FGP_ACCESSED | FGP_CREAT, mapping->gfp_mask); - if (IS_ERR(folio)) + if (IS_ERR(folio)) { clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags); - else if (folio && !folio_test_private(folio)) + return NULL; + } + if (!folio_test_private(folio)) folio_attach_private(folio, (void *)1); - return folio; } diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c index 798a2c1b38c6c4948865700e5e9b01a634f2475e..7a8f166f2c8d841129b2208dd4bf701f9b885deb 100644 --- a/fs/nilfs2/bmap.c +++ b/fs/nilfs2/bmap.c @@ -67,20 +67,28 @@ int nilfs_bmap_lookup_at_level(struct nilfs_bmap *bmap, __u64 key, int level, down_read(&bmap->b_sem); ret = bmap->b_ops->bop_lookup(bmap, key, level, ptrp); - if (ret < 0) { - ret = nilfs_bmap_convert_error(bmap, __func__, ret); + if (ret < 0) goto out; - } + if (NILFS_BMAP_USE_VBN(bmap)) { ret = nilfs_dat_translate(nilfs_bmap_get_dat(bmap), *ptrp, &blocknr); if (!ret) *ptrp = blocknr; + else if (ret == -ENOENT) { + /* + * If there was no valid entry in DAT for the block + * address obtained by b_ops->bop_lookup, then pass + * internal code -EINVAL to nilfs_bmap_convert_error + * to treat it as metadata corruption. + */ + ret = -EINVAL; + } } out: up_read(&bmap->b_sem); - return ret; + return nilfs_bmap_convert_error(bmap, __func__, ret); } int nilfs_bmap_lookup_contig(struct nilfs_bmap *bmap, __u64 key, __u64 *ptrp, diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 228659612c0d76a67243755fc3512839c561e98c..ac949fd7603ff22f4c5654c3ae2ab9c06ccd92bd 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -2041,6 +2041,9 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode) struct the_nilfs *nilfs = sci->sc_super->s_fs_info; int err; + if (sb_rdonly(sci->sc_super)) + return -EROFS; + nilfs_sc_cstage_set(sci, NILFS_ST_INIT); sci->sc_cno = nilfs->ns_cno; @@ -2724,7 +2727,7 @@ static void nilfs_segctor_write_out(struct nilfs_sc_info *sci) flush_work(&sci->sc_iput_work); - } while (ret && retrycount-- > 0); + } while (ret && ret != -EROFS && retrycount-- > 0); } /** diff --git a/mm/mmap.c b/mm/mmap.c index 5522130ae6065d256c8cd80616378137c417132f..13678edaa22c9005437a53965d6feccd457c5769 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -960,17 +960,17 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm, merge_next = true; } + /* Verify some invariant that must be enforced by the caller. */ + VM_WARN_ON(prev && addr <= prev->vm_start); + VM_WARN_ON(curr && (addr != curr->vm_start || end > curr->vm_end)); + VM_WARN_ON(addr >= end); + if (!merge_prev && !merge_next) return NULL; /* Not mergeable. */ res = vma = prev; remove = remove2 = adjust = NULL; - /* Verify some invariant that must be enforced by the caller. */ - VM_WARN_ON(prev && addr <= prev->vm_start); - VM_WARN_ON(curr && (addr != curr->vm_start || end > curr->vm_end)); - VM_WARN_ON(addr >= end); - /* Can we merge both the predecessor and the successor? */ if (merge_prev && merge_next && is_mergeable_anon_vma(prev->anon_vma, next->anon_vma, NULL)) { diff --git a/mm/vmscan.c b/mm/vmscan.c index 5bde0740930335319afed277d16c52b2325403eb..d257916f39e50027dc7c06aa38c19430ee9dc4ee 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1967,6 +1967,16 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, } } + /* + * Folio is unmapped now so it cannot be newly pinned anymore. + * No point in trying to reclaim folio if it is pinned. + * Furthermore we don't want to reclaim underlying fs metadata + * if the folio is pinned and thus potentially modified by the + * pinning process as that may upset the filesystem. + */ + if (folio_maybe_dma_pinned(folio)) + goto activate_locked; + mapping = folio_mapping(folio); if (folio_test_dirty(folio)) { /*