diff --git a/sys/dev/md/md.c b/sys/dev/md/md.c --- a/sys/dev/md/md.c +++ b/sys/dev/md/md.c @@ -1600,6 +1600,7 @@ { int error, res; vm_pindex_t oldpages, newpages; + vm_ooffset_t change; switch (sc->type) { case MD_VNODE: @@ -1612,17 +1613,18 @@ oldpages = OFF_TO_IDX(sc->mediasize); newpages = OFF_TO_IDX(mdr->md_mediasize); if (newpages < oldpages) { + change = IDX_TO_OFF(oldpages - newpages); VM_OBJECT_WLOCK(sc->s_swap.object); vm_object_page_remove(sc->s_swap.object, newpages, 0, 0); - swap_release_by_cred(IDX_TO_OFF(oldpages - - newpages), sc->cred); - sc->s_swap.object->charge = IDX_TO_OFF(newpages); + swap_release_by_cred(change, sc->cred); + vm_object_adjust_charge(sc->s_swap.object, change, + false); sc->s_swap.object->size = newpages; VM_OBJECT_WUNLOCK(sc->s_swap.object); } else if (newpages > oldpages) { - res = swap_reserve_by_cred(IDX_TO_OFF(newpages - - oldpages), sc->cred); + change = IDX_TO_OFF(newpages - oldpages); + res = swap_reserve_by_cred(change, sc->cred); if (!res) return (ENOMEM); if ((mdr->md_options & MD_RESERVE) || @@ -1637,8 +1639,9 @@ } } VM_OBJECT_WLOCK(sc->s_swap.object); - sc->s_swap.object->charge = IDX_TO_OFF(newpages); sc->s_swap.object->size = newpages; + vm_object_adjust_charge(sc->s_swap.object, change, + true); VM_OBJECT_WUNLOCK(sc->s_swap.object); } break; diff --git a/sys/kern/uipc_shm.c b/sys/kern/uipc_shm.c --- a/sys/kern/uipc_shm.c +++ b/sys/kern/uipc_shm.c @@ -759,7 +759,7 @@ /* Free the swap accounted for shm */ swap_release_by_cred(delta, object->cred); - object->charge -= delta; + vm_object_adjust_charge(object, delta, false); } else { if ((shmfd->shm_seals & F_SEAL_GROW) != 0) return (EPERM); @@ -768,7 +768,7 @@ delta = IDX_TO_OFF(nobjsize - object->size); if (!swap_reserve_by_cred(delta, object->cred)) return (ENOMEM); - object->charge += delta; + vm_object_adjust_charge(object, delta, true); } shmfd->shm_size = length; mtx_lock(&shm_timestamp_lock); diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -2457,7 +2457,8 @@ KASSERT(entry->object.vm_object->cred == NULL, ("OVERCOMMIT: %s: both cred e %p", __func__, entry)); entry->object.vm_object->cred = entry->cred; - entry->object.vm_object->charge = entry->end - entry->start; + vm_object_adjust_charge(entry->object.vm_object, + entry->end - entry->start, true); VM_OBJECT_WUNLOCK(entry->object.vm_object); entry->cred = NULL; } @@ -2968,7 +2969,7 @@ crhold(cred); obj->cred = cred; - obj->charge = ptoa(obj->size); + vm_object_adjust_charge(obj, ptoa(obj->size), true); VM_OBJECT_WUNLOCK(obj); } @@ -3990,15 +3991,14 @@ if (offidxend >= object->size && offidxstart < object->size) { size1 = object->size; - object->size = offidxstart; if (object->cred != NULL) { - size1 -= object->size; - KASSERT(object->charge >= ptoa(size1), - ("object %p charge < 0", object)); + size1 -= offidxstart; swap_release_by_cred(ptoa(size1), object->cred); - object->charge -= ptoa(size1); + vm_object_adjust_charge(object, + ptoa(size1), false); } + object->size = offidxstart; } } VM_OBJECT_WUNLOCK(object); @@ -4195,10 +4195,13 @@ if (src_entry->cred != NULL && !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { KASSERT(src_object->cred == NULL, - ("OVERCOMMIT: vm_map_copy_anon_entry: cred %p", - src_object)); + ("OVERCOMMIT: vm_map_copy_anon_entry: obj %p cred %p", + src_object, src_object->cred)); + KASSERT(src_object->charge == 0, + ("OVERCOMMIT: vm_map_copy_anon_entry: obj %p charge %#jx", + src_object, (uintmax_t)src_object->charge)); src_object->cred = src_entry->cred; - src_object->charge = size; + vm_object_adjust_charge(src_object, size, true); } dst_entry->object.vm_object = src_object; if (charged) { @@ -4454,9 +4457,13 @@ if (old_entry->cred != NULL) { KASSERT(object->cred == NULL, ("vmspace_fork both cred")); + KASSERT(object->charge == 0, + ("vmspace_fork obj %p charge %#jx", + object, (uintmax_t)object->charge)); object->cred = old_entry->cred; - object->charge = old_entry->end - - old_entry->start; + vm_object_adjust_charge(object, + old_entry->end - old_entry->start, + true); old_entry->cred = NULL; } diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h --- a/sys/vm/vm_object.h +++ b/sys/vm/vm_object.h @@ -357,6 +357,7 @@ void umtx_shm_object_terminated(vm_object_t object); extern int umtx_shm_vnobj_persistent; +void vm_object_adjust_charge(vm_object_t obj, vm_ooffset_t change, bool inc); vm_object_t vm_object_allocate (objtype_t, vm_pindex_t); vm_object_t vm_object_allocate_anon(vm_pindex_t, vm_object_t, struct ucred *, vm_size_t); diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c --- a/sys/vm/vm_object.c +++ b/sys/vm/vm_object.c @@ -113,6 +113,8 @@ static boolean_t vm_object_page_remove_write(vm_page_t p, int flags, boolean_t *allclean); static void vm_object_backing_remove(vm_object_t object); +static void vm_object_set_charge_unlocked(vm_object_t obj, + vm_ooffset_t charge); /* * Virtual memory objects maintain the actual data @@ -466,7 +468,7 @@ _vm_object_allocate(OBJT_SWAP, size, OBJ_ANON | OBJ_ONEMAPPING | OBJ_SWAP, object, handle); object->cred = cred; - object->charge = cred != NULL ? charge : 0; + vm_object_set_charge_unlocked(object, cred != NULL ? charge : 0); return (object); } @@ -1552,9 +1554,7 @@ } if (orig_object->cred != NULL) { crhold(orig_object->cred); - KASSERT(orig_object->charge >= ptoa(size), - ("orig_object->charge < 0")); - orig_object->charge -= ptoa(size); + vm_object_adjust_charge(orig_object, ptoa(size), false); } /* @@ -2163,7 +2163,7 @@ vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset, vm_size_t prev_size, vm_size_t next_size, int cflags) { - vm_pindex_t next_end, next_pindex; + vm_pindex_t next_end, next_pindex, old_obj_size; if (prev_object == NULL) return (TRUE); @@ -2199,6 +2199,16 @@ next_end = next_pindex + next_size; + /* + * Extend the object if necessary. Do it before calling + * vm_object_adjust_charge(), roll back if swap reservation + * failed. + */ + old_obj_size = prev_object->size; + if (next_end > old_obj_size) + prev_object->size = next_end; + + /* * Account for the charge. */ @@ -2213,16 +2223,17 @@ * entry, and swap reservation for this entry is * managed in appropriate time. */ - if (next_end > prev_object->size) { - vm_size_t charge = ptoa(next_end - prev_object->size); + if (next_end > old_obj_size) { + vm_size_t charge = ptoa(next_end - old_obj_size); if ((cflags & OBJCO_CHARGED) == 0) { if (!swap_reserve_by_cred(charge, prev_object->cred)) { + prev_object->size = old_obj_size; VM_OBJECT_WUNLOCK(prev_object); return (FALSE); } - } else if (prev_object->size > next_pindex) { + } else if (old_obj_size > next_pindex) { /* * The caller charged, but: * - the object has already accounted for the @@ -2230,10 +2241,10 @@ * - and the object end is between previous * mapping end and next_end. */ - swap_release_by_cred(ptoa(prev_object->size - + swap_release_by_cred(ptoa(old_obj_size - next_pindex), prev_object->cred); } - prev_object->charge += charge; + vm_object_adjust_charge(prev_object, charge, true); } else if ((cflags & OBJCO_CHARGED) != 0) { /* * The caller charged, but the object has @@ -2249,15 +2260,9 @@ * Remove any pages that may still be in the object from a previous * deallocation. */ - if (next_pindex < prev_object->size) + if (next_pindex < old_obj_size) vm_object_page_remove(prev_object, next_pindex, next_end, 0); - /* - * Extend the object if necessary. - */ - if (next_end > prev_object->size) - prev_object->size = next_end; - #ifdef INVARIANTS /* * Re-check: there must be no pages in the next range backed @@ -2485,6 +2490,58 @@ (void)blockcount_sleep(&obj->busy, NULL, wmesg, PVM); } +void +vm_object_adjust_charge(vm_object_t obj, vm_ooffset_t change, bool inc) +{ + VM_OBJECT_ASSERT_WLOCKED(obj); + KASSERT((obj->flags & OBJ_SWAP) != 0, + ("vm_object_adjust_charge: obj %p not swap", obj)); + KASSERT((change & PAGE_MASK) == 0, + ("vm_object_adjust_charge: change %jx is in pages", + (uintmax_t)change)); + + if (inc) { + KASSERT(obj->charge + change >= obj->charge, + ("vm_object_adjust_charge: obj %p charge overflow " + "charge %#jx change %#jx sum %#jx", + obj, (uintmax_t)obj->charge, (uintmax_t)change, + (uintmax_t)obj->charge + change)); + KASSERT(obj->charge + change <= ptoa(obj->size), + ("vm_object_adjust_charge: obj %p charge too large " + "charge %#jx change %#jx sum %#jx size %#jx", + obj, (uintmax_t)obj->charge, (uintmax_t)change, + (uintmax_t)obj->charge + change, + (uintmax_t)ptoa(obj->size))); + obj->charge += change; + } else { + KASSERT(obj->charge >= change, + ("vm_object_adjust_charge: obj %p charge underflow " + "charge %#jx change %#jx sum %#jx", + obj, (uintmax_t)obj->charge, (uintmax_t)change, + (uintmax_t)obj->charge + change)); + obj->charge -= change; + } +} + +static void +vm_object_set_charge_unlocked(vm_object_t obj, vm_ooffset_t charge) +{ + KASSERT((obj->flags & OBJ_SWAP) != 0, + ("vm_obj_set_charge: obj %p not swap", obj)); + KASSERT((charge & PAGE_MASK) == 0, + ("vm_object_set_charge: change %jx is in pages", + (uintmax_t)charge)); + KASSERT(charge <= ptoa(obj->size), + ("vm_object_set_charge: obj %p charge too large " + "charge %#jx size %#jx", + obj, (uintmax_t)obj->charge, (uintmax_t)obj->size)); + KASSERT(obj->charge == 0, + ("vm_object_set_charge: obj %p charge non-zero %#jx", + obj, (uintmax_t)obj->charge)); + + obj->charge = charge; +} + /* * This function aims to determine if the object is mapped, * specifically, if it is referenced by a vm_map_entry. Because