Index: sys/net/if_vlan.c =================================================================== --- sys/net/if_vlan.c +++ sys/net/if_vlan.c @@ -57,7 +57,7 @@ #include #include #include -#include +#include #include #include @@ -115,6 +115,7 @@ uint16_t ifvm_proto; /* encapsulation ethertype */ uint16_t ifvm_tag; /* tag to apply on packets leaving if */ } ifv_mib; + struct task lladdr_task; SLIST_HEAD(, vlan_mc_entry) vlan_mc_listhead; #ifndef VLAN_ARRAY LIST_ENTRY(ifvlan) ifv_list; @@ -154,33 +155,58 @@ static eventhandler_tag iflladdr_tag; /* - * We have a global mutex, that is used to serialize configuration - * changes and isn't used in normal packet delivery. + * The global vlan rmlock(9) is used to synchronize configuration changes as + * well as the destruction/unlinking of ifvlan and ifvlantrunks. * + * The locking strategy is straightforward. The global lock protects the + * creation/destruction of ifvlans and ifvlantrunks, i.e. it protects + * references to them. Any reading of a ifvlantrunk reference (ifnet's + * if_vlantrunk field, ifvlan's ifv_trunk field) must be done under (at least) + * a shared lock on the global lock. Similarly any writing to these fields must + * be done under an exlusive lock on the global lock. This ensures that any + * readers are guaranteed to read NULL when the trunk or vlan disappears. Since + * the global vlan lock must be acquired before the usage of an ifvlantrunk, + * naturally every acquisition of a trunk's lock must be preceded by an + * acquisition of the global lock. + * + * Additionally we leverage an exclusive lock to ensure that ioctl changes + * that could lead to inconsistencies if done concurrently (e.g. SIOCSIFLLADDR) + * are synchronized. + */ + +static struct rmlock ifv_lock; +#define VLAN_LOCK_INIT() rm_init(&ifv_lock, "vlan_global") +#define VLAN_LOCK_DESTROY() rm_destroy(&ifv_lock) +#define VLAN_RLOCK() rm_rlock(&ifv_lock, &vlan_tracker) +#define VLAN_WLOCK() rm_wlock(&ifv_lock) +#define VLAN_RUNLOCK() rm_runlock(&ifv_lock, &vlan_tracker) +#define VLAN_WUNLOCK() rm_wunlock(&ifv_lock) +#define VLAN_RLOCK_ASSERT() rm_assert(&ifv_lock, RA_RLOCKED) +#define VLAN_LOCK_ASSERT() rm_assert(&ifv_lock, RA_LOCKED) +#define VLAN_WLOCK_ASSERT() rm_assert(&ifv_lock, RA_WLOCKED) +#define VLAN_LOCK_READER struct rm_priotracker vlan_tracker + +/* * We also have a per-trunk rmlock(9), that is locked shared on packet * processing and exclusive when configuration is changed. - * - * The VLAN_ARRAY substitutes the dynamic hash with a static array - * with 4096 entries. In theory this can give a boost in processing, - * however on practice it does not. Probably this is because array - * is too big to fit into CPU cache. */ -static struct sx ifv_lock; -#define VLAN_LOCK_INIT() sx_init(&ifv_lock, "vlan_global") -#define VLAN_LOCK_DESTROY() sx_destroy(&ifv_lock) -#define VLAN_LOCK_ASSERT() sx_assert(&ifv_lock, SA_LOCKED) -#define VLAN_LOCK() sx_xlock(&ifv_lock) -#define VLAN_UNLOCK() sx_xunlock(&ifv_lock) #define TRUNK_LOCK_INIT(trunk) rm_init(&(trunk)->lock, vlanname) #define TRUNK_LOCK_DESTROY(trunk) rm_destroy(&(trunk)->lock) -#define TRUNK_LOCK(trunk) rm_wlock(&(trunk)->lock) -#define TRUNK_UNLOCK(trunk) rm_wunlock(&(trunk)->lock) -#define TRUNK_LOCK_ASSERT(trunk) rm_assert(&(trunk)->lock, RA_WLOCKED) -#define TRUNK_RLOCK(trunk) rm_rlock(&(trunk)->lock, &tracker) -#define TRUNK_RUNLOCK(trunk) rm_runlock(&(trunk)->lock, &tracker) -#define TRUNK_LOCK_RASSERT(trunk) rm_assert(&(trunk)->lock, RA_RLOCKED) -#define TRUNK_LOCK_READER struct rm_priotracker tracker +#define TRUNK_RLOCK(trunk) rm_rlock(&(trunk)->lock, &trunk_tracker) +#define TRUNK_WLOCK(trunk) rm_wlock(&(trunk)->lock) +#define TRUNK_RUNLOCK(trunk) rm_runlock(&(trunk)->lock, &trunk_tracker) +#define TRUNK_WUNLOCK(trunk) rm_wunlock(&(trunk)->lock) +#define TRUNK_RLOCK_ASSERT(trunk) rm_assert(&(trunk)->lock, RA_RLOCKED) +#define TRUNK_LOCK_ASSERT(trunk) rm_assert(&(trunk)->lock, RA_LOCKED) +#define TRUNK_WLOCK_ASSERT(trunk) rm_assert(&(trunk)->lock, RA_WLOCKED) +#define TRUNK_LOCK_READER struct rm_priotracker trunk_tracker +/* + * The VLAN_ARRAY substitutes the dynamic hash with a static array + * with 4096 entries. In theory this can give a boost in processing, + * however in practice it does not. Probably this is because the array + * is too big to fit into CPU cache. + */ #ifndef VLAN_ARRAY static void vlan_inithash(struct ifvlantrunk *trunk); static void vlan_freehash(struct ifvlantrunk *trunk); @@ -216,6 +242,8 @@ static void vlan_ifdetach(void *arg, struct ifnet *ifp); static void vlan_iflladdr(void *arg, struct ifnet *ifp); +static void vlan_lladdr_fn(void *arg, int pending); + static struct if_clone *vlan_cloner; #ifdef VIMAGE @@ -270,7 +298,7 @@ int i, b; struct ifvlan *ifv2; - TRUNK_LOCK_ASSERT(trunk); + TRUNK_WLOCK_ASSERT(trunk); KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); b = 1 << trunk->hwidth; @@ -300,7 +328,7 @@ int i, b; struct ifvlan *ifv2; - TRUNK_LOCK_ASSERT(trunk); + TRUNK_WLOCK_ASSERT(trunk); KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); b = 1 << trunk->hwidth; @@ -328,7 +356,7 @@ struct ifvlanhead *hash2; int hwidth2, i, j, n, n2; - TRUNK_LOCK_ASSERT(trunk); + TRUNK_WLOCK_ASSERT(trunk); KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); if (howmuch == 0) { @@ -374,7 +402,7 @@ { struct ifvlan *ifv; - TRUNK_LOCK_RASSERT(trunk); + TRUNK_RLOCK_ASSERT(trunk); LIST_FOREACH(ifv, &trunk->hash[HASH(vid, trunk->hmask)], ifv_list) if (ifv->ifv_vid == vid) @@ -444,12 +472,12 @@ static void trunk_destroy(struct ifvlantrunk *trunk) { - VLAN_LOCK_ASSERT(); + VLAN_WLOCK_ASSERT(); - TRUNK_LOCK(trunk); + TRUNK_WLOCK(trunk); vlan_freehash(trunk); trunk->parent->if_vlantrunk = NULL; - TRUNK_UNLOCK(trunk); + TRUNK_WUNLOCK(trunk); TRUNK_LOCK_DESTROY(trunk); free(trunk, M_VLAN); } @@ -471,9 +499,13 @@ struct vlan_mc_entry *mc; int error; + VLAN_WLOCK_ASSERT(); + /* Find the parent. */ sc = ifp->if_softc; - TRUNK_LOCK_ASSERT(TRUNK(sc)); + if (TRUNK(sc) == NULL) + return (EINVAL); + TRUNK_WLOCK_ASSERT(TRUNK(sc)); ifp_p = PARENT(sc); CURVNET_SET_QUIET(ifp_p->if_vnet); @@ -520,36 +552,50 @@ vlan_iflladdr(void *arg __unused, struct ifnet *ifp) { struct ifvlan *ifv; + struct ifnet *ifv_ifp; + struct ifvlantrunk *trunk; #ifndef VLAN_ARRAY struct ifvlan *next; #endif int i; + struct sockaddr_dl *sdl; /* - * Check if it's a trunk interface first of all - * to avoid needless locking. + * We need an exclusive lock here to prevent concurrent SIOCSIFLLADDR + * ioctl calls on the parent garbling the lladdr of the child vlan. */ - if (ifp->if_vlantrunk == NULL) + VLAN_WLOCK(); + trunk = ifp->if_vlantrunk; + if (trunk == NULL) { + VLAN_WUNLOCK(); return; + } - VLAN_LOCK(); /* * OK, it's a trunk. Loop over and change all vlan's lladdrs on it. */ #ifdef VLAN_ARRAY for (i = 0; i < VLAN_ARRAY_SIZE; i++) - if ((ifv = ifp->if_vlantrunk->vlans[i])) { + if ((ifv = trunk->vlans[i])) { #else /* VLAN_ARRAY */ - for (i = 0; i < (1 << ifp->if_vlantrunk->hwidth); i++) - LIST_FOREACH_SAFE(ifv, &ifp->if_vlantrunk->hash[i], ifv_list, next) { + for (i = 0; i < (1 << trunk->hwidth); i++) + LIST_FOREACH_SAFE(ifv, &trunk->hash[i], ifv_list, next) { #endif /* VLAN_ARRAY */ - VLAN_UNLOCK(); - if_setlladdr(ifv->ifv_ifp, IF_LLADDR(ifp), + /* + * Copy new new lladdr into the ifv_ifp, enqueue a task + * to actually call if_setlladdr. if_setlladdr needs to + * be deferred to a taskqueue because it will call into + * the if_vlan ioctl path and try to acquire the global + * lock. + */ + ifv_ifp = ifv->ifv_ifp; + bcopy(IF_LLADDR(ifp), IF_LLADDR(ifv_ifp), ifp->if_addrlen); - VLAN_LOCK(); + sdl = (struct sockaddr_dl *)ifv_ifp->if_addr->ifa_addr; + sdl->sdl_alen = ifp->if_addrlen; + taskqueue_enqueue(taskqueue_swi, &ifv->lladdr_task); } - VLAN_UNLOCK(); - + VLAN_WUNLOCK(); } /* @@ -563,20 +609,18 @@ vlan_ifdetach(void *arg __unused, struct ifnet *ifp) { struct ifvlan *ifv; + struct ifvlantrunk *trunk; int i; - /* - * Check if it's a trunk interface first of all - * to avoid needless locking. - */ - if (ifp->if_vlantrunk == NULL) - return; - - /* If the ifnet is just being renamed, don't do anything. */ if (ifp->if_flags & IFF_RENAMING) return; + VLAN_WLOCK(); + trunk = ifp->if_vlantrunk; + if (trunk == NULL) { + VLAN_WUNLOCK(); + return; + } - VLAN_LOCK(); /* * OK, it's a trunk. Loop over and detach all vlan's on it. * Check trunk pointer after each vlan_unconfig() as it will @@ -584,25 +628,26 @@ */ #ifdef VLAN_ARRAY for (i = 0; i < VLAN_ARRAY_SIZE; i++) - if ((ifv = ifp->if_vlantrunk->vlans[i])) { + if ((ifv = trunk->vlans[i])) { vlan_unconfig_locked(ifv->ifv_ifp, 1); - if (ifp->if_vlantrunk == NULL) + if ((trunk = ifp->if_vlantrunk) == NULL) break; } #else /* VLAN_ARRAY */ restart: - for (i = 0; i < (1 << ifp->if_vlantrunk->hwidth); i++) - if ((ifv = LIST_FIRST(&ifp->if_vlantrunk->hash[i]))) { + for (i = 0; i < (1 << trunk->hwidth); i++) + if ((ifv = LIST_FIRST(&trunk->hash[i]))) { vlan_unconfig_locked(ifv->ifv_ifp, 1); - if (ifp->if_vlantrunk) + if ((trunk = ifp->if_vlantrunk) != NULL) goto restart; /* trunk->hwidth can change */ else break; } #endif /* VLAN_ARRAY */ /* Trunk should have been destroyed in vlan_unconfig(). */ - KASSERT(ifp->if_vlantrunk == NULL, ("%s: purge failed", __func__)); - VLAN_UNLOCK(); + KASSERT(trunk == NULL && ifp->if_vlantrunk == NULL, + ("%s: purge failed", __func__)); + VLAN_WUNLOCK(); } /* @@ -612,15 +657,16 @@ vlan_trunkdev(struct ifnet *ifp) { struct ifvlan *ifv; + VLAN_LOCK_READER; if (ifp->if_type != IFT_L2VLAN) return (NULL); ifv = ifp->if_softc; ifp = NULL; - VLAN_LOCK(); + VLAN_RLOCK(); if (ifv->ifv_trunk) ifp = PARENT(ifv); - VLAN_UNLOCK(); + VLAN_RUNLOCK(); return (ifp); } @@ -682,17 +728,22 @@ { struct ifvlantrunk *trunk; struct ifvlan *ifv; + VLAN_LOCK_READER; TRUNK_LOCK_READER; + VLAN_RLOCK(); trunk = ifp->if_vlantrunk; - if (trunk == NULL) + if (trunk == NULL) { + VLAN_RUNLOCK(); return (NULL); + } ifp = NULL; TRUNK_RLOCK(trunk); ifv = vlan_gethash(trunk, vid); if (ifv) ifp = ifv->ifv_ifp; TRUNK_RUNLOCK(trunk); + VLAN_RUNLOCK(); return (ifp); } @@ -970,9 +1021,6 @@ return (error); } - - /* Update flags on the parent, if necessary. */ - vlan_setflags(ifp, 1); } return (0); @@ -986,6 +1034,12 @@ ether_ifdetach(ifp); /* first, remove it from system-wide lists */ vlan_unconfig(ifp); /* now it can be unconfigured and freed */ + /* + * We should have the only reference to the ifv now, so we can now + * drain any remaining lladdr task before freeing the ifnet and the + * ifvlan. + */ + taskqueue_drain(taskqueue_swi, &ifv->lladdr_task); if_free(ifp); free(ifv, M_VLAN); ifc_free_unit(ifc, unit); @@ -1010,8 +1064,16 @@ struct ifvlan *ifv; struct ifnet *p; int error, len, mcast; + VLAN_LOCK_READER; ifv = ifp->if_softc; + VLAN_RLOCK(); + if (TRUNK(ifv) == NULL) { + if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); + VLAN_RUNLOCK(); + m_freem(m); + return (ENETDOWN); + } p = PARENT(ifv); len = m->m_pkthdr.len; mcast = (m->m_flags & (M_MCAST | M_BCAST)) ? 1 : 0; @@ -1023,8 +1085,9 @@ * or parent's driver will cause a system crash. */ if (!UP_AND_RUNNING(p)) { - m_freem(m); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); + VLAN_RUNLOCK(); + m_freem(m); return (ENETDOWN); } @@ -1052,6 +1115,7 @@ if (n > 0) { if_printf(ifp, "cannot pad short frame\n"); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); + VLAN_RUNLOCK(); m_freem(m); return (0); } @@ -1072,6 +1136,7 @@ if (m == NULL) { if_printf(ifp, "unable to prepend VLAN header\n"); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); + VLAN_RUNLOCK(); return (0); } } @@ -1086,6 +1151,7 @@ if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast); } else if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); + VLAN_RUNLOCK(); return (error); } @@ -1100,12 +1166,19 @@ static void vlan_input(struct ifnet *ifp, struct mbuf *m) { - struct ifvlantrunk *trunk = ifp->if_vlantrunk; + struct ifvlantrunk *trunk; struct ifvlan *ifv; + VLAN_LOCK_READER; TRUNK_LOCK_READER; uint16_t vid; - KASSERT(trunk != NULL, ("%s: no trunk", __func__)); + VLAN_RLOCK(); + trunk = ifp->if_vlantrunk; + if (trunk == NULL) { + VLAN_RUNLOCK(); + m_freem(m); + return; + } if (m->m_flags & M_VLANTAG) { /* @@ -1125,6 +1198,7 @@ if (m->m_len < sizeof(*evl) && (m = m_pullup(m, sizeof(*evl))) == NULL) { if_printf(ifp, "cannot pullup VLAN header\n"); + VLAN_RUNLOCK(); return; } evl = mtod(m, struct ether_vlan_header *); @@ -1146,8 +1220,9 @@ panic("%s: %s has unsupported if_type %u", __func__, ifp->if_xname, ifp->if_type); #endif - m_freem(m); if_inc_counter(ifp, IFCOUNTER_NOPROTO, 1); + VLAN_RUNLOCK(); + m_freem(m); return; } } @@ -1156,19 +1231,33 @@ ifv = vlan_gethash(trunk, vid); if (ifv == NULL || !UP_AND_RUNNING(ifv->ifv_ifp)) { TRUNK_RUNLOCK(trunk); - m_freem(m); if_inc_counter(ifp, IFCOUNTER_NOPROTO, 1); + VLAN_RUNLOCK(); + m_freem(m); return; } TRUNK_RUNLOCK(trunk); m->m_pkthdr.rcvif = ifv->ifv_ifp; if_inc_counter(ifv->ifv_ifp, IFCOUNTER_IPACKETS, 1); + VLAN_RUNLOCK(); /* Pass it back through the parent's input routine. */ (*ifp->if_input)(ifv->ifv_ifp, m); } +static void +vlan_lladdr_fn(void *arg, int pending __unused) +{ + struct ifvlan *ifv; + struct ifnet *ifp; + + ifv = (struct ifvlan *)arg; + ifp = ifv->ifv_ifp; + /* The ifv_ifp already has the lladdr copied in. */ + if_setlladdr(ifp, IF_LLADDR(ifp), ifp->if_addrlen); +} + static int vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t vid) { @@ -1195,26 +1284,27 @@ if (ifv->ifv_trunk) return (EBUSY); + VLAN_WLOCK(); if (p->if_vlantrunk == NULL) { + VLAN_WUNLOCK(); trunk = malloc(sizeof(struct ifvlantrunk), M_VLAN, M_WAITOK | M_ZERO); vlan_inithash(trunk); - VLAN_LOCK(); + VLAN_WLOCK(); if (p->if_vlantrunk != NULL) { - /* A race that that is very unlikely to be hit. */ + /* Someone else already allocated the trunk. */ vlan_freehash(trunk); free(trunk, M_VLAN); goto exists; } TRUNK_LOCK_INIT(trunk); - TRUNK_LOCK(trunk); + TRUNK_WLOCK(trunk); p->if_vlantrunk = trunk; trunk->parent = p; } else { - VLAN_LOCK(); exists: trunk = p->if_vlantrunk; - TRUNK_LOCK(trunk); + TRUNK_WLOCK(trunk); } ifv->ifv_vid = vid; /* must set this before vlan_inshash() */ @@ -1288,15 +1378,19 @@ * Configure multicast addresses that may already be * joined on the vlan device. */ - (void)vlan_setmulti(ifp); /* XXX: VLAN lock held */ + (void)vlan_setmulti(ifp); + + TASK_INIT(&ifv->lladdr_task, 0, vlan_lladdr_fn, ifv); /* We are ready for operation now. */ ifp->if_drv_flags |= IFF_DRV_RUNNING; + /* Update flags on the parent, if necessary. */ + vlan_setflags(ifp, 1); done: - TRUNK_UNLOCK(trunk); + TRUNK_WUNLOCK(trunk); if (error == 0) EVENTHANDLER_INVOKE(vlan_config, p, ifv->ifv_vid); - VLAN_UNLOCK(); + VLAN_WUNLOCK(); return (error); } @@ -1305,9 +1399,9 @@ vlan_unconfig(struct ifnet *ifp) { - VLAN_LOCK(); + VLAN_WLOCK(); vlan_unconfig_locked(ifp, 0); - VLAN_UNLOCK(); + VLAN_WUNLOCK(); } static void @@ -1319,7 +1413,7 @@ struct ifnet *parent; int error; - VLAN_LOCK_ASSERT(); + VLAN_WLOCK_ASSERT(); ifv = ifp->if_softc; trunk = ifv->ifv_trunk; @@ -1327,7 +1421,7 @@ if (trunk != NULL) { - TRUNK_LOCK(trunk); + TRUNK_WLOCK(trunk); parent = trunk->parent; /* @@ -1365,17 +1459,10 @@ */ if (trunk->refcnt == 0) { parent->if_vlantrunk = NULL; - /* - * XXXGL: If some ithread has already entered - * vlan_input() and is now blocked on the trunk - * lock, then it should preempt us right after - * unlock and finish its work. Then we will acquire - * lock again in trunk_destroy(). - */ - TRUNK_UNLOCK(trunk); + TRUNK_WUNLOCK(trunk); trunk_destroy(trunk); } else - TRUNK_UNLOCK(trunk); + TRUNK_WUNLOCK(trunk); } /* Disconnect from parent. */ @@ -1402,7 +1489,7 @@ struct ifvlan *ifv; int error; - /* XXX VLAN_LOCK_ASSERT(); */ + VLAN_LOCK_ASSERT(); ifv = ifp->if_softc; status = status ? (ifp->if_flags & flag) : 0; @@ -1417,6 +1504,8 @@ * That's why we can be sure that recorded flags still are * in accord with actual parent's flags. */ + if (TRUNK(ifv) == NULL) + return (EINVAL); if (status != (ifv->ifv_pflags & flag)) { error = (*func)(PARENT(ifv), status); if (error) @@ -1450,11 +1539,19 @@ static void vlan_link_state(struct ifnet *ifp) { - struct ifvlantrunk *trunk = ifp->if_vlantrunk; + struct ifvlantrunk *trunk; struct ifvlan *ifv; int i; + VLAN_LOCK_READER; - TRUNK_LOCK(trunk); + VLAN_RLOCK(); + trunk = ifp->if_vlantrunk; + if (trunk == NULL) { + VLAN_RUNLOCK(); + return; + } + + TRUNK_WLOCK(trunk); #ifdef VLAN_ARRAY for (i = 0; i < VLAN_ARRAY_SIZE; i++) if (trunk->vlans[i] != NULL) { @@ -1467,17 +1564,23 @@ if_link_state_change(ifv->ifv_ifp, trunk->parent->if_link_state); } - TRUNK_UNLOCK(trunk); + TRUNK_WUNLOCK(trunk); + VLAN_RUNLOCK(); } static void vlan_capabilities(struct ifvlan *ifv) { - struct ifnet *p = PARENT(ifv); - struct ifnet *ifp = ifv->ifv_ifp; + struct ifnet *p; + struct ifnet *ifp; struct ifnet_hw_tsomax hw_tsomax; - TRUNK_LOCK_ASSERT(TRUNK(ifv)); + VLAN_LOCK_ASSERT(); + if (TRUNK(ifv) == NULL) + return; + TRUNK_WLOCK_ASSERT(TRUNK(ifv)); + p = PARENT(ifv); + ifp = ifv->ifv_ifp; /* * If the parent interface can do checksum offloading @@ -1535,11 +1638,18 @@ static void vlan_trunk_capabilities(struct ifnet *ifp) { - struct ifvlantrunk *trunk = ifp->if_vlantrunk; + struct ifvlantrunk *trunk; struct ifvlan *ifv; int i; + VLAN_LOCK_READER; - TRUNK_LOCK(trunk); + VLAN_RLOCK(); + trunk = ifp->if_vlantrunk; + if (trunk == NULL) { + VLAN_RUNLOCK(); + return; + } + TRUNK_WLOCK(trunk); #ifdef VLAN_ARRAY for (i = 0; i < VLAN_ARRAY_SIZE; i++) if (trunk->vlans[i] != NULL) { @@ -1550,7 +1660,8 @@ #endif vlan_capabilities(ifv); } - TRUNK_UNLOCK(trunk); + TRUNK_WUNLOCK(trunk); + VLAN_RUNLOCK(); } static int @@ -1563,6 +1674,7 @@ struct ifvlantrunk *trunk; struct vlanreq vlr; int error = 0; + VLAN_LOCK_READER; ifr = (struct ifreq *)data; ifa = (struct ifaddr *) data; @@ -1585,10 +1697,10 @@ } break; case SIOCGIFMEDIA: - VLAN_LOCK(); + VLAN_RLOCK(); if (TRUNK(ifv) != NULL) { p = PARENT(ifv); - VLAN_UNLOCK(); + VLAN_RUNLOCK(); error = (*p->if_ioctl)(p, SIOCGIFMEDIA, data); /* Limit the result to the parent's current config. */ if (error == 0) { @@ -1603,7 +1715,7 @@ } } } else { - VLAN_UNLOCK(); + VLAN_RUNLOCK(); error = EINVAL; } break; @@ -1616,7 +1728,7 @@ /* * Set the interface MTU. */ - VLAN_LOCK(); + VLAN_RLOCK(); if (TRUNK(ifv) != NULL) { if (ifr->ifr_mtu > (PARENT(ifv)->if_mtu - ifv->ifv_mtufudge) || @@ -1627,7 +1739,7 @@ ifp->if_mtu = ifr->ifr_mtu; } else error = EINVAL; - VLAN_UNLOCK(); + VLAN_RUNLOCK(); break; case SIOCSETVLAN: @@ -1657,11 +1769,6 @@ break; } error = vlan_config(ifv, p, vlr.vlr_tag); - if (error) - break; - - /* Update flags on the parent, if necessary. */ - vlan_setflags(ifp, 1); break; case SIOCGETVLAN: @@ -1672,13 +1779,13 @@ } #endif bzero(&vlr, sizeof(vlr)); - VLAN_LOCK(); + VLAN_RLOCK(); if (TRUNK(ifv) != NULL) { strlcpy(vlr.vlr_parent, PARENT(ifv)->if_xname, sizeof(vlr.vlr_parent)); vlr.vlr_tag = ifv->ifv_vid; } - VLAN_UNLOCK(); + VLAN_RUNLOCK(); error = copyout(&vlr, ifr->ifr_data, sizeof(vlr)); break; @@ -1687,8 +1794,10 @@ * We should propagate selected flags to the parent, * e.g., promiscuous mode. */ + VLAN_RLOCK(); if (TRUNK(ifv) != NULL) error = vlan_setflags(ifp, 1); + VLAN_RUNLOCK(); break; case SIOCADDMULTI: @@ -1697,12 +1806,14 @@ * If we don't have a parent, just remember the membership for * when we do. */ + VLAN_WLOCK(); trunk = TRUNK(ifv); if (trunk != NULL) { - TRUNK_LOCK(trunk); + TRUNK_WLOCK(trunk); error = vlan_setmulti(ifp); - TRUNK_UNLOCK(trunk); + TRUNK_WUNLOCK(trunk); } + VLAN_WUNLOCK(); break; default: