diff --git a/sys/kern/kern_sendfile.c b/sys/kern/kern_sendfile.c --- a/sys/kern/kern_sendfile.c +++ b/sys/kern/kern_sendfile.c @@ -518,8 +518,7 @@ static int sendfile_getobj(struct thread *td, struct file *fp, vm_object_t *obj_res, - struct vnode **vp_res, struct shmfd **shmfd_res, off_t *obj_size, - int *bsize) + struct vnode **vp_res, struct shmfd **shmfd_res, off_t *obj_size) { vm_object_t obj; struct vnode *vp; @@ -530,7 +529,6 @@ vp = *vp_res = NULL; obj = NULL; shmfd = *shmfd_res = NULL; - *bsize = 0; /* * The file descriptor must be a regular file and have a @@ -543,7 +541,6 @@ error = EINVAL; goto out; } - *bsize = vp->v_mount->mnt_stat.f_iosize; obj = vp->v_object; if (obj == NULL) { error = EINVAL; @@ -717,7 +714,7 @@ struct shmfd *shmfd; struct vattr va; off_t off, sbytes, rem, obj_size, nobj_size; - int bsize, error, ext_pgs_idx, hdrlen, max_pgs, softerr; + int error, ext_pgs_idx, hdrlen, max_pgs, softerr; #ifdef KERN_TLS int tls_enq_cnt; #endif @@ -733,7 +730,7 @@ softerr = 0; use_ext_pgs = false; - error = sendfile_getobj(td, fp, &obj, &vp, &shmfd, &obj_size, &bsize); + error = sendfile_getobj(td, fp, &obj, &vp, &shmfd, &obj_size); if (error != 0) goto out; diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c --- a/sys/vm/swap_pager.c +++ b/sys/vm/swap_pager.c @@ -1362,14 +1362,22 @@ swap_pager_getpages_locked(struct pctrie_iter *blks, vm_object_t object, vm_page_t *ma, int count, int *a_rbehind, int *a_rahead, struct buf *bp) { + vm_page_t m; vm_pindex_t pindex; - int rahead, rbehind; + int i, rahead, rbehind; VM_OBJECT_ASSERT_WLOCKED(object); KASSERT((object->flags & OBJ_SWAP) != 0, ("%s: object not swappable", __func__)); - pindex = ma[0]->pindex; + for (i = 0; i < count; i++) { + m = ma[i]; + if (m != bogus_page) { + pindex = m->pindex - i; + break; + } + } + MPASS(i != count); if (!swp_pager_haspage_iter(pindex, &rbehind, &rahead, blks)) { VM_OBJECT_WUNLOCK(object); uma_zfree(swrbuf_zone, bp); @@ -1396,8 +1404,14 @@ vm_object_prepare_buf_pages(object, bp->b_pages, count, &rbehind, &rahead, ma); bp->b_npages = rbehind + count + rahead; - for (int i = 0; i < bp->b_npages; i++) - bp->b_pages[i]->oflags |= VPO_SWAPINPROG; + KASSERT(bp->b_npages <= PBUF_PAGES, + ("bp_npages %d (rb %d c %d ra %d) not less than PBUF_PAGES %jd", + bp->b_npages, rbehind, count, rahead, (uintmax_t)PBUF_PAGES)); + for (i = 0; i < bp->b_npages; i++) { + m = bp->b_pages[i]; + if (m != bogus_page) + m->oflags |= VPO_SWAPINPROG; + } bp->b_blkno = swp_pager_meta_lookup(blks, pindex - rbehind); KASSERT(bp->b_blkno != SWAPBLK_NONE, ("no swap blocking containing %p(%jx)", object, (uintmax_t)pindex)); @@ -1445,8 +1459,14 @@ */ VM_OBJECT_WLOCK(object); /* This could be implemented more efficiently with aflags */ - while ((ma[0]->oflags & VPO_SWAPINPROG) != 0) { - ma[0]->oflags |= VPO_SWAPSLEEP; + for (i = 0; i < count; i++) { + m = ma[i]; + if (m != bogus_page) + break; + } + MPASS(i != count); + while ((m->oflags & VPO_SWAPINPROG) != 0) { + m->oflags |= VPO_SWAPSLEEP; VM_CNT_INC(v_intrans); if (VM_OBJECT_SLEEP(object, &object->handle, PSWP, "swread", hz * 20)) { @@ -1460,9 +1480,10 @@ /* * If we had an unrecoverable read error pages will not be valid. */ - for (int i = 0; i < count; i++) - if (ma[i]->valid != VM_PAGE_BITS_ALL) + for (i = 0; i < count; i++) { + if (ma[i] != bogus_page && ma[i]->valid != VM_PAGE_BITS_ALL) return (VM_PAGER_ERROR); + } return (VM_PAGER_OK); @@ -1727,6 +1748,9 @@ for (i = 0; i < bp->b_npages; ++i) { vm_page_t m = bp->b_pages[i]; + if (m == bogus_page) + continue; + m->oflags &= ~VPO_SWAPINPROG; if (m->oflags & VPO_SWAPSLEEP) { m->oflags &= ~VPO_SWAPSLEEP;