diff options
author | Ulrich Drepper <drepper@redhat.com> | 1999-06-13 13:36:34 +0000 |
---|---|---|
committer | Ulrich Drepper <drepper@redhat.com> | 1999-06-13 13:36:34 +0000 |
commit | ec239360d13518a13f572b635d036c7d10028010 (patch) | |
tree | bdb5111363f45d2107849c2456b575d72779174c /db2/lock | |
parent | fc3703521650a9b6db910a50c4fc0f410496e134 (diff) | |
download | glibc-ec239360d13518a13f572b635d036c7d10028010.tar glibc-ec239360d13518a13f572b635d036c7d10028010.tar.gz glibc-ec239360d13518a13f572b635d036c7d10028010.tar.bz2 glibc-ec239360d13518a13f572b635d036c7d10028010.zip |
Update.
* db2/Makefile (distribute): Remove files which do not exist
anymore.
Diffstat (limited to 'db2/lock')
-rw-r--r-- | db2/lock/lock.c | 386 | ||||
-rw-r--r-- | db2/lock/lock_conflict.c | 8 | ||||
-rw-r--r-- | db2/lock/lock_deadlock.c | 76 | ||||
-rw-r--r-- | db2/lock/lock_region.c | 59 | ||||
-rw-r--r-- | db2/lock/lock_util.c | 8 |
5 files changed, 404 insertions, 133 deletions
diff --git a/db2/lock/lock.c b/db2/lock/lock.c index 3d20e0d65b..4cf1d9ecca 100644 --- a/db2/lock/lock.c +++ b/db2/lock/lock.c @@ -8,7 +8,7 @@ #include "config.h" #ifndef lint -static const char sccsid[] = "@(#)lock.c 10.52 (Sleepycat) 5/10/98"; +static const char sccsid[] = "@(#)lock.c 10.61 (Sleepycat) 1/3/99"; #endif /* not lint */ #ifndef NO_SYSTEM_INCLUDES @@ -23,16 +23,22 @@ static const char sccsid[] = "@(#)lock.c 10.52 (Sleepycat) 5/10/98"; #include "db_page.h" #include "db_shash.h" #include "lock.h" -#include "common_ext.h" #include "db_am.h" +#include "txn_auto.h" +#include "txn_ext.h" +#include "common_ext.h" static void __lock_checklocker __P((DB_LOCKTAB *, struct __db_lock *, int)); static void __lock_freeobj __P((DB_LOCKTAB *, DB_LOCKOBJ *)); -static int __lock_get_internal __P((DB_LOCKTAB *, u_int32_t, u_int32_t, - const DBT *, db_lockmode_t, struct __db_lock **)); +static int __lock_get_internal __P((DB_LOCKTAB *, u_int32_t, DB_TXN *, + u_int32_t, const DBT *, db_lockmode_t, struct __db_lock **)); +static int __lock_is_parent __P((u_int32_t, DB_TXN *)); +static int __lock_promote __P((DB_LOCKTAB *, DB_LOCKOBJ *)); static int __lock_put_internal __P((DB_LOCKTAB *, struct __db_lock *, int)); static void __lock_remove_waiter __P((DB_LOCKTAB *, DB_LOCKOBJ *, struct __db_lock *, db_status_t)); +static int __lock_vec_internal __P((DB_LOCKTAB *, u_int32_t, DB_TXN *, + u_int32_t, DB_LOCKREQ *, int, DB_LOCKREQ **elistp)); int lock_id(lt, idp) @@ -41,6 +47,8 @@ lock_id(lt, idp) { u_int32_t id; + LOCK_PANIC_CHECK(lt); + LOCK_LOCKREGION(lt); if (lt->region->id >= DB_LOCK_MAXID) lt->region->id = 0; @@ -58,10 +66,37 @@ lock_vec(lt, locker, flags, list, nlist, elistp) int nlist; DB_LOCKREQ *list, **elistp; { + return (__lock_vec_internal(lt, + locker, NULL, flags, list, nlist, elistp)); +} + +int +lock_tvec(lt, txn, flags, list, nlist, elistp) + DB_LOCKTAB *lt; + DB_TXN *txn; + u_int32_t flags; + int nlist; + DB_LOCKREQ *list, **elistp; +{ + return (__lock_vec_internal(lt, + txn->txnid, txn, flags, list, nlist, elistp)); +} + +static int +__lock_vec_internal(lt, locker, txn, flags, list, nlist, elistp) + DB_LOCKTAB *lt; + u_int32_t locker; + DB_TXN *txn; + u_int32_t flags; + int nlist; + DB_LOCKREQ *list, **elistp; +{ struct __db_lock *lp; - DB_LOCKOBJ *sh_obj, *sh_locker; + DB_LOCKOBJ *sh_obj, *sh_locker, *sh_parent; int i, ret, run_dd; + LOCK_PANIC_CHECK(lt); + /* Validate arguments. */ if ((ret = __db_fchk(lt->dbenv, "lock_vec", flags, DB_LOCK_NOWAIT)) != 0) @@ -78,13 +113,43 @@ lock_vec(lt, locker, flags, list, nlist, elistp) for (i = 0; i < nlist && ret == 0; i++) { switch (list[i].op) { case DB_LOCK_GET: - ret = __lock_get_internal(lt, locker, flags, + ret = __lock_get_internal(lt, locker, txn, flags, list[i].obj, list[i].mode, &lp); if (ret == 0) { list[i].lock = LOCK_TO_OFFSET(lt, lp); lt->region->nrequests++; } break; + case DB_LOCK_INHERIT: + /* Find the locker. */ + if ((ret = __lock_getobj(lt, locker, + NULL, DB_LOCK_LOCKER, &sh_locker)) != 0) + break; + if (txn == NULL || txn->parent == NULL) { + ret = EINVAL; + break; + } + + if ((ret = __lock_getobj(lt, txn->parent->txnid, + NULL, DB_LOCK_LOCKER, &sh_parent)) != 0) + break; + + /* + * Traverse all the locks held by this locker. Remove + * the locks from the locker's list and put them on the + * parent's list. + */ + for (lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock); + lp != NULL; + lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock)) { + SH_LIST_REMOVE(lp, locker_links, __db_lock); + SH_LIST_INSERT_HEAD(&sh_parent->heldby, lp, + locker_links, __db_lock); + lp->holder = txn->parent->txnid; + } + __lock_freeobj(lt, sh_locker); + lt->region->nlockers--; + break; case DB_LOCK_PUT: lp = OFFSET_TO_LOCK(lt, list[i].lock); if (lp->holder != locker) { @@ -93,8 +158,8 @@ lock_vec(lt, locker, flags, list, nlist, elistp) } list[i].mode = lp->mode; - /* XXX Need to copy the object. ??? */ ret = __lock_put_internal(lt, lp, 0); + __lock_checklocker(lt, lp, 0); break; case DB_LOCK_PUT_ALL: /* Find the locker. */ @@ -204,18 +269,25 @@ lock_get(lt, locker, flags, obj, lock_mode, lock) struct __db_lock *lockp; int ret; + LOCK_PANIC_CHECK(lt); + /* Validate arguments. */ - if ((ret = - __db_fchk(lt->dbenv, "lock_get", flags, DB_LOCK_NOWAIT)) != 0) + if ((ret = __db_fchk(lt->dbenv, + "lock_get", flags, DB_LOCK_NOWAIT | DB_LOCK_UPGRADE)) != 0) return (ret); LOCK_LOCKREGION(lt); - ret = __lock_validate_region(lt); - if (ret == 0 && (ret = __lock_get_internal(lt, - locker, flags, obj, lock_mode, &lockp)) == 0) { - *lock = LOCK_TO_OFFSET(lt, lockp); - lt->region->nrequests++; + if ((ret = __lock_validate_region(lt)) == 0) { + if (LF_ISSET(DB_LOCK_UPGRADE)) + lockp = OFFSET_TO_LOCK(lt, *lock); + + if ((ret = __lock_get_internal(lt, + locker, NULL, flags, obj, lock_mode, &lockp)) == 0) { + if (!LF_ISSET(DB_LOCK_UPGRADE)) + *lock = LOCK_TO_OFFSET(lt, lockp); + lt->region->nrequests++; + } } UNLOCK_LOCKREGION(lt); @@ -223,6 +295,42 @@ lock_get(lt, locker, flags, obj, lock_mode, lock) } int +lock_tget(lt, txn, flags, obj, lock_mode, lock) + DB_LOCKTAB *lt; + DB_TXN *txn; + u_int32_t flags; + const DBT *obj; + db_lockmode_t lock_mode; + DB_LOCK *lock; +{ + struct __db_lock *lockp; + int ret; + + LOCK_PANIC_CHECK(lt); + + /* Validate arguments. */ + if ((ret = __db_fchk(lt->dbenv, + "lock_get", flags, DB_LOCK_NOWAIT | DB_LOCK_UPGRADE)) != 0) + return (ret); + + LOCK_LOCKREGION(lt); + + if ((ret = __lock_validate_region(lt)) == 0) { + if (LF_ISSET(DB_LOCK_UPGRADE)) + lockp = OFFSET_TO_LOCK(lt, *lock); + + if ((ret = __lock_get_internal(lt, + txn->txnid, txn, flags, obj, lock_mode, &lockp)) == 0) { + if (!LF_ISSET(DB_LOCK_UPGRADE)) + *lock = LOCK_TO_OFFSET(lt, lockp); + lt->region->nrequests++; + } + } + + UNLOCK_LOCKREGION(lt); + return (ret); +} +int lock_put(lt, lock) DB_LOCKTAB *lt; DB_LOCK lock; @@ -230,6 +338,8 @@ lock_put(lt, lock) struct __db_lock *lockp; int ret, run_dd; + LOCK_PANIC_CHECK(lt); + LOCK_LOCKREGION(lt); if ((ret = __lock_validate_region(lt)) != 0) @@ -261,7 +371,6 @@ __lock_put_internal(lt, lockp, do_all) struct __db_lock *lockp; int do_all; { - struct __db_lock *lp_w, *lp_h, *next_waiter; DB_LOCKOBJ *sh_obj; int state_changed; @@ -293,39 +402,7 @@ __lock_put_internal(lt, lockp, do_all) else SH_TAILQ_REMOVE(&sh_obj->holders, lockp, links, __db_lock); - /* - * We need to do lock promotion. We also need to determine if - * we're going to need to run the deadlock detector again. If - * we release locks, and there are waiters, but no one gets promoted, - * then we haven't fundamentally changed the lockmgr state, so - * we may still have a deadlock and we have to run again. However, - * if there were no waiters, or we actually promoted someone, then - * we are OK and we don't have to run it immediately. - */ - for (lp_w = SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock), - state_changed = lp_w == NULL; - lp_w != NULL; - lp_w = next_waiter) { - next_waiter = SH_TAILQ_NEXT(lp_w, links, __db_lock); - for (lp_h = SH_TAILQ_FIRST(&sh_obj->holders, __db_lock); - lp_h != NULL; - lp_h = SH_TAILQ_NEXT(lp_h, links, __db_lock)) { - if (CONFLICTS(lt, lp_h->mode, lp_w->mode) && - lp_h->holder != lp_w->holder) - break; - } - if (lp_h != NULL) /* Found a conflict. */ - break; - - /* No conflict, promote the waiting lock. */ - SH_TAILQ_REMOVE(&sh_obj->waiters, lp_w, links, __db_lock); - lp_w->status = DB_LSTAT_PENDING; - SH_TAILQ_INSERT_TAIL(&sh_obj->holders, lp_w, links); - - /* Wake up waiter. */ - (void)__db_mutex_unlock(&lp_w->mutex, lt->reginfo.fd); - state_changed = 1; - } + state_changed = __lock_promote(lt, sh_obj); /* Check if object should be reclaimed. */ if (SH_TAILQ_FIRST(&sh_obj->holders, __db_lock) == NULL) { @@ -354,9 +431,10 @@ __lock_put_internal(lt, lockp, do_all) } static int -__lock_get_internal(lt, locker, flags, obj, lock_mode, lockp) +__lock_get_internal(lt, locker, txn, flags, obj, lock_mode, lockp) DB_LOCKTAB *lt; u_int32_t locker, flags; + DB_TXN *txn; const DBT *obj; db_lockmode_t lock_mode; struct __db_lock **lockp; @@ -365,13 +443,13 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, lockp) DB_LOCKOBJ *sh_obj, *sh_locker; DB_LOCKREGION *lrp; size_t newl_off; - int ihold, ret; + int ihold, no_dd, ret; + + no_dd = ret = 0; - ret = 0; /* * Check that lock mode is valid. */ - lrp = lt->region; if ((u_int32_t)lock_mode >= lrp->nmodes) { __db_err(lt->dbenv, @@ -423,20 +501,28 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, lockp) * lock, then we guarantee deadlock. * * In case of conflict, we put the new lock on the end of the waiters - * list. + * list, unless we are upgrading in which case the locker goes on the + * front of the list. */ ihold = 0; for (lp = SH_TAILQ_FIRST(&sh_obj->holders, __db_lock); lp != NULL; lp = SH_TAILQ_NEXT(lp, links, __db_lock)) { - if (locker == lp->holder) { + if (locker == lp->holder || + __lock_is_parent(lp->holder, txn)) { if (lp->mode == lock_mode && lp->status == DB_LSTAT_HELD) { - /* Lock is held, just inc the ref count. */ + if (LF_ISSET(DB_LOCK_UPGRADE)) + goto upgrade; + + /* + * Lock is held, so we can increment the + * reference count and return this lock. + */ lp->refcount++; + *lockp = lp; SH_TAILQ_INSERT_HEAD(&lrp->free_locks, newl, links, __db_lock); - *lockp = lp; return (0); } else ihold = 1; @@ -444,6 +530,21 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, lockp) break; } + /* + * If we are upgrading, then there are two scenarios. Either + * we had no conflicts, so we can do the upgrade. Or, there + * is a conflict and we should wait at the HEAD of the waiters + * list. + */ + if (LF_ISSET(DB_LOCK_UPGRADE)) { + if (lp == NULL) + goto upgrade; + + /* There was a conflict, wait. */ + SH_TAILQ_INSERT_HEAD(&sh_obj->waiters, newl, links, __db_lock); + goto wait; + } + if (lp == NULL && !ihold) for (lp = SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock); lp != NULL; @@ -464,31 +565,35 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, lockp) } /* - * This is really a blocker for the process, so initialize it - * set. That way the current process will block when it tries - * to get it and the waking process will release it. - */ - (void)__db_mutex_init(&newl->mutex, - MUTEX_LOCK_OFFSET(lt->region, &newl->mutex)); - (void)__db_mutex_lock(&newl->mutex, lt->reginfo.fd); - - /* - * Now, insert the lock onto its locker's list. + * Now, insert the lock onto its locker's list. If the locker does + * not currently hold any locks, there's no reason to run a deadlock + * detector, save that information. */ if ((ret = __lock_getobj(lt, locker, NULL, DB_LOCK_LOCKER, &sh_locker)) != 0) return (ret); + no_dd = SH_LIST_FIRST(&sh_locker->heldby, __db_lock) == NULL; lrp = lt->region; SH_LIST_INSERT_HEAD(&sh_locker->heldby, newl, locker_links, __db_lock); if (lp != NULL) { + /* + * This is really a blocker for the process, so initialize it + * set. That way the current process will block when it tries + * to get it and the waking process will release it. + */ +wait: (void)__db_mutex_init(&newl->mutex, + MUTEX_LOCK_OFFSET(lt->region, &newl->mutex)); + (void)__db_mutex_lock(&newl->mutex, lt->reginfo.fd); + newl->status = DB_LSTAT_WAITING; lrp->nconflicts++; + /* - * We are about to wait; must release the region mutex. - * Then, when we wakeup, we need to reacquire the region - * mutex before continuing. + * We are about to wait; must release the region mutex. Then, + * when we wakeup, we need to reacquire the region mutex before + * continuing. */ if (lrp->detect == DB_LOCK_NORUN) lt->region->need_dd = 1; @@ -498,13 +603,19 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, lockp) * We are about to wait; before waiting, see if the deadlock * detector should be run. */ - if (lrp->detect != DB_LOCK_NORUN) - ret = lock_detect(lt, 0, lrp->detect); + if (lrp->detect != DB_LOCK_NORUN && !no_dd) + (void)lock_detect(lt, 0, lrp->detect); (void)__db_mutex_lock(&newl->mutex, lt->reginfo.fd); LOCK_LOCKREGION(lt); if (newl->status != DB_LSTAT_PENDING) { + /* + * If this lock errored due to a deadlock, then + * we have waiters that require promotion. + */ + if (newl->status == DB_LSTAT_ABORTED) + (void)__lock_promote(lt, sh_obj); /* Return to free list. */ __lock_checklocker(lt, newl, 0); SH_TAILQ_INSERT_HEAD(&lrp->free_locks, newl, links, @@ -522,12 +633,31 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, lockp) } newl->status = DB_LSTAT_FREE; newl = NULL; + } else if (LF_ISSET(DB_LOCK_UPGRADE)) { + /* + * The lock that was just granted got put on the + * holders list. Since we're upgrading some other + * lock, we've got to remove it here. + */ + SH_TAILQ_REMOVE(&sh_obj->holders, + newl, links, __db_lock); + goto upgrade; } else newl->status = DB_LSTAT_HELD; } *lockp = newl; return (ret); + +upgrade: + /* + * This was an upgrade, so return the new lock to the free list and + * upgrade the mode. + */ + (*lockp)->mode = lock_mode; + newl->status = DB_LSTAT_FREE; + SH_TAILQ_INSERT_HEAD(&lrp->free_locks, newl, links, __db_lock); + return (0); } /* @@ -788,3 +918,117 @@ __lock_freeobj(lt, obj) __db_shalloc_free(lt->mem, SH_DBT_PTR(&obj->lockobj)); SH_TAILQ_INSERT_HEAD(<->region->free_objs, obj, links, __db_lockobj); } + +/* + * __lock_downgrade -- + * Used by the concurrent access product to downgrade write locks + * back to iwrite locks. + * + * PUBLIC: int __lock_downgrade __P((DB_LOCKTAB *, + * PUBLIC: DB_LOCK, db_lockmode_t, u_int32_t)); + */ +int +__lock_downgrade(lt, lock, new_mode, flags) + DB_LOCKTAB *lt; + DB_LOCK lock; + db_lockmode_t new_mode; + u_int32_t flags; +{ + struct __db_lock *lockp; + DB_LOCKOBJ *obj; + int ret; + + COMPQUIET(flags, 0); + LOCK_PANIC_CHECK(lt); + LOCK_LOCKREGION(lt); + + if ((ret = __lock_validate_region(lt)) == 0) { + lockp = OFFSET_TO_LOCK(lt, lock); + lockp->mode = new_mode; + + /* Get the object associated with this lock. */ + obj = (DB_LOCKOBJ *)((u_int8_t *)lockp + lockp->obj); + (void)__lock_promote(lt, obj); + ++lt->region->nreleases; + } + + UNLOCK_LOCKREGION(lt); + + return (ret); +} + +/* + * __lock_promote -- + * + * Look through the waiters and holders lists and decide which (if any) + * locks can be promoted. Promote any that are eligible. + */ +static int +__lock_promote(lt, obj) + DB_LOCKTAB *lt; + DB_LOCKOBJ *obj; +{ + struct __db_lock *lp_w, *lp_h, *next_waiter; + int state_changed, waiter_is_txn; + + /* + * We need to do lock promotion. We also need to determine if + * we're going to need to run the deadlock detector again. If + * we release locks, and there are waiters, but no one gets promoted, + * then we haven't fundamentally changed the lockmgr state, so + * we may still have a deadlock and we have to run again. However, + * if there were no waiters, or we actually promoted someone, then + * we are OK and we don't have to run it immediately. + * + * During promotion, we look for state changes so we can return + * this information to the caller. + */ + for (lp_w = SH_TAILQ_FIRST(&obj->waiters, __db_lock), + state_changed = lp_w == NULL; + lp_w != NULL; + lp_w = next_waiter) { + waiter_is_txn = TXN_IS_HOLDING(lp_w); + next_waiter = SH_TAILQ_NEXT(lp_w, links, __db_lock); + for (lp_h = SH_TAILQ_FIRST(&obj->holders, __db_lock); + lp_h != NULL; + lp_h = SH_TAILQ_NEXT(lp_h, links, __db_lock)) { + if (CONFLICTS(lt, lp_h->mode, lp_w->mode) && + lp_h->holder != lp_w->holder && + !(waiter_is_txn && + TXN_IS_HOLDING(lp_h) && + __txn_is_ancestor(lt->dbenv->tx_info, + lp_h->txnoff, lp_w->txnoff))) + break; + } + if (lp_h != NULL) /* Found a conflict. */ + break; + + /* No conflict, promote the waiting lock. */ + SH_TAILQ_REMOVE(&obj->waiters, lp_w, links, __db_lock); + lp_w->status = DB_LSTAT_PENDING; + SH_TAILQ_INSERT_TAIL(&obj->holders, lp_w, links); + + /* Wake up waiter. */ + (void)__db_mutex_unlock(&lp_w->mutex, lt->reginfo.fd); + state_changed = 1; + } + + return (state_changed); +} + +static int +__lock_is_parent(locker, txn) + u_int32_t locker; + DB_TXN *txn; +{ + DB_TXN *t; + + if (txn == NULL) + return (0); + + for (t = txn->parent; t != NULL; t = t->parent) + if (t->txnid == locker) + return (1); + + return (0); +} diff --git a/db2/lock/lock_conflict.c b/db2/lock/lock_conflict.c index 870aa0dc17..4be858af7a 100644 --- a/db2/lock/lock_conflict.c +++ b/db2/lock/lock_conflict.c @@ -8,7 +8,7 @@ #include "config.h" #ifndef lint -static const char sccsid[] = "@(#)lock_conflict.c 10.3 (Sleepycat) 4/10/98"; +static const char sccsid[] = "@(#)lock_conflict.c 10.4 (Sleepycat) 11/20/98"; #endif /* not lint */ #ifndef NO_SYSTEM_INCLUDES @@ -29,11 +29,11 @@ const u_int8_t db_rw_conflicts[] = { }; const u_int8_t db_riw_conflicts[] = { - /* N S X IS IX SIX */ + /* N S X IX IS SIX */ /* N */ 0, 0, 0, 0, 0, 0, - /* S */ 0, 0, 1, 0, 1, 1, + /* S */ 0, 0, 1, 1, 0, 1, /* X */ 1, 1, 1, 1, 1, 1, - /* IS */ 0, 0, 1, 0, 0, 0, /* IX */ 0, 1, 1, 0, 0, 0, + /* IS */ 0, 0, 1, 0, 0, 0, /* SIX */ 0, 1, 1, 0, 0, 0 }; diff --git a/db2/lock/lock_deadlock.c b/db2/lock/lock_deadlock.c index 4de492944e..8b2f91bc9e 100644 --- a/db2/lock/lock_deadlock.c +++ b/db2/lock/lock_deadlock.c @@ -8,7 +8,7 @@ #include "config.h" #ifndef lint -static const char sccsid[] = "@(#)lock_deadlock.c 10.32 (Sleepycat) 4/26/98"; +static const char sccsid[] = "@(#)lock_deadlock.c 10.37 (Sleepycat) 10/4/98"; #endif /* not lint */ #ifndef NO_SYSTEM_INCLUDES @@ -69,6 +69,8 @@ lock_detect(lt, flags, atype) u_int32_t *bitmap, *deadlock, i, killid, nentries, nlockers; int do_pass, ret; + LOCK_PANIC_CHECK(lt); + /* Validate arguments. */ if ((ret = __db_fchk(lt->dbenv, "lock_detect", flags, DB_LOCK_CONFLICT)) != 0) @@ -176,8 +178,8 @@ lock_detect(lt, flags, atype) "warning: unable to abort locker %lx", (u_long)idmap[killid].id); } - __db_free(bitmap); - __db_free(idmap); + __os_free(bitmap, 0); + __os_free(idmap, 0); return (ret); } @@ -198,7 +200,7 @@ __dd_build(dbenv, bmp, nlockers, idmap) u_int8_t *pptr; locker_info *id_array; u_int32_t *bitmap, count, *entryp, i, id, nentries, *tmpmap; - int is_first; + int is_first, ret; lt = dbenv->lk_info; @@ -230,25 +232,20 @@ retry: count = lt->region->nlockers; * We can probably save the malloc's between iterations just * reallocing if necessary because count grew by too much. */ - if ((bitmap = (u_int32_t *)__db_calloc((size_t)count, - sizeof(u_int32_t) * nentries)) == NULL) { - __db_err(dbenv, "%s", strerror(ENOMEM)); - return (ENOMEM); - } + if ((ret = __os_calloc((size_t)count, + sizeof(u_int32_t) * nentries, &bitmap)) != 0) + return (ret); - if ((tmpmap = - (u_int32_t *)__db_calloc(sizeof(u_int32_t), nentries)) == NULL) { - __db_err(dbenv, "%s", strerror(ENOMEM)); - __db_free(bitmap); - return (ENOMEM); + if ((ret = __os_calloc(sizeof(u_int32_t), nentries, &tmpmap)) != 0) { + __os_free(bitmap, sizeof(u_int32_t) * nentries); + return (ret); } - if ((id_array = (locker_info *)__db_calloc((size_t)count, - sizeof(locker_info))) == NULL) { - __db_err(dbenv, "%s", strerror(ENOMEM)); - __db_free(bitmap); - __db_free(tmpmap); - return (ENOMEM); + if ((ret = + __os_calloc((size_t)count, sizeof(locker_info), &id_array)) != 0) { + __os_free(bitmap, count * sizeof(u_int32_t) * nentries); + __os_free(tmpmap, sizeof(u_int32_t) * nentries); + return (ret); } /* @@ -256,9 +253,9 @@ retry: count = lt->region->nlockers; */ LOCK_LOCKREGION(lt); if (lt->region->nlockers > count) { - __db_free(bitmap); - __db_free(tmpmap); - __db_free(id_array); + __os_free(bitmap, count * sizeof(u_int32_t) * nentries); + __os_free(tmpmap, sizeof(u_int32_t) * nentries); + __os_free(id_array, count * sizeof(locker_info)); goto retry; } @@ -383,7 +380,7 @@ retry: count = lt->region->nlockers; *nlockers = id; *idmap = id_array; *bmp = bitmap; - __db_free(tmpmap); + __os_free(tmpmap, sizeof(u_int32_t) * nentries); return (0); } @@ -434,8 +431,21 @@ __dd_abort(dbenv, info) goto out; lockp = SH_LIST_FIRST(&lockerp->heldby, __db_lock); - if (LOCK_TO_OFFSET(lt, lockp) != info->last_lock || - lockp == NULL || lockp->status != DB_LSTAT_WAITING) + + /* + * It's possible that this locker was already aborted. + * If that's the case, make sure that we remove its + * locker from the hash table. + */ + if (lockp == NULL) { + HASHREMOVE_EL(lt->hashtab, __db_lockobj, + links, lockerp, lt->region->table_size, __lock_lhash); + SH_TAILQ_INSERT_HEAD(<->region->free_objs, + lockerp, links, __db_lockobj); + lt->region->nlockers--; + goto out; + } else if (LOCK_TO_OFFSET(lt, lockp) != info->last_lock || + lockp->status != DB_LSTAT_WAITING) goto out; /* Abort lock, take it off list, and wake up this lock. */ @@ -460,17 +470,17 @@ __dd_debug(dbenv, idmap, bitmap, nlockers) u_int32_t *bitmap, nlockers; { u_int32_t i, j, *mymap, nentries; + int ret; char *msgbuf; __db_err(dbenv, "Waitsfor array"); __db_err(dbenv, "waiter\twaiting on"); - /* - * Allocate space to print 10 bytes per item waited on. - */ - if ((msgbuf = (char *)__db_malloc((nlockers + 1) * 10 + 64)) == NULL) { - __db_err(dbenv, "%s", strerror(ENOMEM)); + + /* Allocate space to print 10 bytes per item waited on. */ +#undef MSGBUF_LEN +#define MSGBUF_LEN ((nlockers + 1) * 10 + 64) + if ((ret = __os_malloc(MSGBUF_LEN, NULL, &msgbuf)) != 0) return; - } nentries = ALIGN(nlockers, 32) / 32; for (mymap = bitmap, i = 0; i < nlockers; i++, mymap += nentries) { @@ -487,6 +497,6 @@ __dd_debug(dbenv, idmap, bitmap, nlockers) __db_err(dbenv, msgbuf); } - __db_free(msgbuf); + __os_free(msgbuf, MSGBUF_LEN); } #endif diff --git a/db2/lock/lock_region.c b/db2/lock/lock_region.c index b597560744..613a6cefb2 100644 --- a/db2/lock/lock_region.c +++ b/db2/lock/lock_region.c @@ -8,7 +8,7 @@ #include "config.h" #ifndef lint -static const char sccsid[] = "@(#)lock_region.c 10.15 (Sleepycat) 6/2/98"; +static const char sccsid[] = "@(#)lock_region.c 10.21 (Sleepycat) 10/19/98"; #endif /* not lint */ #ifndef NO_SYSTEM_INCLUDES @@ -29,7 +29,8 @@ static u_int32_t __lock_count_locks __P((DB_LOCKREGION *)); static u_int32_t __lock_count_objs __P((DB_LOCKREGION *)); static void __lock_dump_locker __P((DB_LOCKTAB *, DB_LOCKOBJ *, FILE *)); static void __lock_dump_object __P((DB_LOCKTAB *, DB_LOCKOBJ *, FILE *)); -static const char *__lock_dump_status __P((db_status_t)); +static const char * + __lock_dump_status __P((db_status_t)); static void __lock_reset_region __P((DB_LOCKTAB *)); static int __lock_tabinit __P((DB_ENV *, DB_LOCKREGION *)); @@ -55,10 +56,8 @@ lock_open(path, flags, mode, dbenv, ltp) return (ret); /* Create the lock table structure. */ - if ((lt = (DB_LOCKTAB *)__db_calloc(1, sizeof(DB_LOCKTAB))) == NULL) { - __db_err(dbenv, "%s", strerror(ENOMEM)); - return (ENOMEM); - } + if ((ret = __os_calloc(1, sizeof(DB_LOCKTAB), <)) != 0) + return (ret); lt->dbenv = dbenv; /* Grab the values that we need to compute the region size. */ @@ -82,7 +81,7 @@ lock_open(path, flags, mode, dbenv, ltp) if (path == NULL) lt->reginfo.path = NULL; else - if ((lt->reginfo.path = (char *)__db_strdup(path)) == NULL) + if ((ret = __os_strdup(path, <->reginfo.path)) != 0) goto err; lt->reginfo.file = DB_DEFAULT_LOCK_FILE; lt->reginfo.mode = mode; @@ -147,12 +146,27 @@ err: if (lt->reginfo.addr != NULL) { } if (lt->reginfo.path != NULL) - FREES(lt->reginfo.path); - FREE(lt, sizeof(*lt)); + __os_freestr(lt->reginfo.path); + __os_free(lt, sizeof(*lt)); return (ret); } /* + * __lock_panic -- + * Panic a lock region. + * + * PUBLIC: void __lock_panic __P((DB_ENV *)); + */ +void +__lock_panic(dbenv) + DB_ENV *dbenv; +{ + if (dbenv->lk_info != NULL) + dbenv->lk_info->region->hdr.panic = 1; +} + + +/* * __lock_tabinit -- * Initialize the lock region. */ @@ -254,12 +268,14 @@ lock_close(lt) { int ret; + LOCK_PANIC_CHECK(lt); + if ((ret = __db_rdetach(<->reginfo)) != 0) return (ret); if (lt->reginfo.path != NULL) - FREES(lt->reginfo.path); - FREE(lt, sizeof(*lt)); + __os_freestr(lt->reginfo.path); + __os_free(lt, sizeof(*lt)); return (0); } @@ -276,12 +292,12 @@ lock_unlink(path, force, dbenv) memset(®info, 0, sizeof(reginfo)); reginfo.dbenv = dbenv; reginfo.appname = DB_APP_NONE; - if (path != NULL && (reginfo.path = (char *)__db_strdup(path)) == NULL) - return (ENOMEM); + if (path != NULL && (ret = __os_strdup(path, ®info.path)) != 0) + return (ret); reginfo.file = DB_DEFAULT_LOCK_FILE; ret = __db_runlink(®info, force); if (reginfo.path != NULL) - FREES(reginfo.path); + __os_freestr(reginfo.path); return (ret); } @@ -463,13 +479,14 @@ lock_stat(lt, gspp, db_malloc) void *(*db_malloc) __P((size_t)); { DB_LOCKREGION *rp; + int ret; *gspp = NULL; - if ((*gspp = db_malloc == NULL ? - (DB_LOCK_STAT *)__db_malloc(sizeof(**gspp)) : - (DB_LOCK_STAT *)db_malloc(sizeof(**gspp))) == NULL) - return (ENOMEM); + LOCK_PANIC_CHECK(lt); + + if ((ret = __os_malloc(sizeof(**gspp), db_malloc, gspp)) != 0) + return (ret); /* Copy out the global statistics. */ LOCK_LOCKREGION(lt); @@ -632,15 +649,15 @@ __lock_dump_region(lt, area, fp) for (lp = SH_TAILQ_FIRST(&lrp->free_locks, __db_lock); lp != NULL; lp = SH_TAILQ_NEXT(lp, links, __db_lock)) - fprintf(fp, "0x%x: %lu\t%lu\t%s\t0x%x\n", (u_int)lp, + fprintf(fp, "0x%lx: %lu\t%lu\t%s\t0x%lx\n", (u_long)lp, (u_long)lp->holder, (u_long)lp->mode, - __lock_dump_status(lp->status), (u_int)lp->obj); + __lock_dump_status(lp->status), (u_long)lp->obj); fprintf(fp, "%s\nObject free list\n", DB_LINE); for (op = SH_TAILQ_FIRST(&lrp->free_objs, __db_lockobj); op != NULL; op = SH_TAILQ_NEXT(op, links, __db_lockobj)) - fprintf(fp, "0x%x\n", (u_int)op); + fprintf(fp, "0x%lx\n", (u_long)op); } if (LF_ISSET(LOCK_DUMP_MEM)) diff --git a/db2/lock/lock_util.c b/db2/lock/lock_util.c index 7274a50422..29da75b8a8 100644 --- a/db2/lock/lock_util.c +++ b/db2/lock/lock_util.c @@ -8,7 +8,7 @@ #include "config.h" #ifndef lint -static const char sccsid[] = "@(#)lock_util.c 10.9 (Sleepycat) 4/26/98"; +static const char sccsid[] = "@(#)lock_util.c 10.10 (Sleepycat) 9/20/98"; #endif /* not lint */ #ifndef NO_SYSTEM_INCLUDES @@ -75,7 +75,7 @@ __lock_locker_cmp(locker, lock_obj) * fast path the case where we think we are doing a hash on a DB page/fileid * pair. If the size is right, then we do the fast hash. * - * We know that DB uses struct __db_ilocks for its lock objects. The first + * We know that DB uses DB_LOCK_ILOCK types for its lock objects. The first * four bytes are the 4-byte page number and the next DB_FILE_ID_LEN bytes * are a unique file id, where the first 4 bytes on UNIX systems are the file * inode number, and the first 4 bytes on Windows systems are the FileIndexLow @@ -107,7 +107,7 @@ u_int32_t __lock_ohash(dbt) const DBT *dbt; { - if (dbt->size == sizeof(struct __db_ilock)) + if (dbt->size == sizeof(DB_LOCK_ILOCK)) FAST_HASH(dbt->data); return (__ham_func5(dbt->data, dbt->size)); @@ -131,7 +131,7 @@ __lock_lhash(lock_obj) return (tmp); } - if (lock_obj->lockobj.size == sizeof(struct __db_ilock)) + if (lock_obj->lockobj.size == sizeof(DB_LOCK_ILOCK)) FAST_HASH(obj_data); return (__ham_func5(obj_data, lock_obj->lockobj.size)); |