diff options
Diffstat (limited to 'db2/lock')
-rw-r--r-- | db2/lock/lock.c | 1034 | ||||
-rw-r--r-- | db2/lock/lock_conflict.c | 39 | ||||
-rw-r--r-- | db2/lock/lock_deadlock.c | 502 | ||||
-rw-r--r-- | db2/lock/lock_region.c | 743 | ||||
-rw-r--r-- | db2/lock/lock_util.c | 152 |
5 files changed, 0 insertions, 2470 deletions
diff --git a/db2/lock/lock.c b/db2/lock/lock.c deleted file mode 100644 index 4cf1d9ecca..0000000000 --- a/db2/lock/lock.c +++ /dev/null @@ -1,1034 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996, 1997, 1998 - * Sleepycat Software. All rights reserved. - */ - -#include "config.h" - -#ifndef lint -static const char sccsid[] = "@(#)lock.c 10.61 (Sleepycat) 1/3/99"; -#endif /* not lint */ - -#ifndef NO_SYSTEM_INCLUDES -#include <sys/types.h> - -#include <errno.h> -#include <string.h> -#endif - -#include "db_int.h" -#include "shqueue.h" -#include "db_page.h" -#include "db_shash.h" -#include "lock.h" -#include "db_am.h" -#include "txn_auto.h" -#include "txn_ext.h" -#include "common_ext.h" - -static void __lock_checklocker __P((DB_LOCKTAB *, struct __db_lock *, int)); -static void __lock_freeobj __P((DB_LOCKTAB *, DB_LOCKOBJ *)); -static int __lock_get_internal __P((DB_LOCKTAB *, u_int32_t, DB_TXN *, - u_int32_t, const DBT *, db_lockmode_t, struct __db_lock **)); -static int __lock_is_parent __P((u_int32_t, DB_TXN *)); -static int __lock_promote __P((DB_LOCKTAB *, DB_LOCKOBJ *)); -static int __lock_put_internal __P((DB_LOCKTAB *, struct __db_lock *, int)); -static void __lock_remove_waiter - __P((DB_LOCKTAB *, DB_LOCKOBJ *, struct __db_lock *, db_status_t)); -static int __lock_vec_internal __P((DB_LOCKTAB *, u_int32_t, DB_TXN *, - u_int32_t, DB_LOCKREQ *, int, DB_LOCKREQ **elistp)); - -int -lock_id(lt, idp) - DB_LOCKTAB *lt; - u_int32_t *idp; -{ - u_int32_t id; - - LOCK_PANIC_CHECK(lt); - - LOCK_LOCKREGION(lt); - if (lt->region->id >= DB_LOCK_MAXID) - lt->region->id = 0; - id = ++lt->region->id; - UNLOCK_LOCKREGION(lt); - - *idp = id; - return (0); -} - -int -lock_vec(lt, locker, flags, list, nlist, elistp) - DB_LOCKTAB *lt; - u_int32_t locker, flags; - int nlist; - DB_LOCKREQ *list, **elistp; -{ - return (__lock_vec_internal(lt, - locker, NULL, flags, list, nlist, elistp)); -} - -int -lock_tvec(lt, txn, flags, list, nlist, elistp) - DB_LOCKTAB *lt; - DB_TXN *txn; - u_int32_t flags; - int nlist; - DB_LOCKREQ *list, **elistp; -{ - return (__lock_vec_internal(lt, - txn->txnid, txn, flags, list, nlist, elistp)); -} - -static int -__lock_vec_internal(lt, locker, txn, flags, list, nlist, elistp) - DB_LOCKTAB *lt; - u_int32_t locker; - DB_TXN *txn; - u_int32_t flags; - int nlist; - DB_LOCKREQ *list, **elistp; -{ - struct __db_lock *lp; - DB_LOCKOBJ *sh_obj, *sh_locker, *sh_parent; - int i, ret, run_dd; - - LOCK_PANIC_CHECK(lt); - - /* Validate arguments. */ - if ((ret = - __db_fchk(lt->dbenv, "lock_vec", flags, DB_LOCK_NOWAIT)) != 0) - return (ret); - - LOCK_LOCKREGION(lt); - - if ((ret = __lock_validate_region(lt)) != 0) { - UNLOCK_LOCKREGION(lt); - return (ret); - } - - ret = 0; - for (i = 0; i < nlist && ret == 0; i++) { - switch (list[i].op) { - case DB_LOCK_GET: - ret = __lock_get_internal(lt, locker, txn, flags, - list[i].obj, list[i].mode, &lp); - if (ret == 0) { - list[i].lock = LOCK_TO_OFFSET(lt, lp); - lt->region->nrequests++; - } - break; - case DB_LOCK_INHERIT: - /* Find the locker. */ - if ((ret = __lock_getobj(lt, locker, - NULL, DB_LOCK_LOCKER, &sh_locker)) != 0) - break; - if (txn == NULL || txn->parent == NULL) { - ret = EINVAL; - break; - } - - if ((ret = __lock_getobj(lt, txn->parent->txnid, - NULL, DB_LOCK_LOCKER, &sh_parent)) != 0) - break; - - /* - * Traverse all the locks held by this locker. Remove - * the locks from the locker's list and put them on the - * parent's list. - */ - for (lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock); - lp != NULL; - lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock)) { - SH_LIST_REMOVE(lp, locker_links, __db_lock); - SH_LIST_INSERT_HEAD(&sh_parent->heldby, lp, - locker_links, __db_lock); - lp->holder = txn->parent->txnid; - } - __lock_freeobj(lt, sh_locker); - lt->region->nlockers--; - break; - case DB_LOCK_PUT: - lp = OFFSET_TO_LOCK(lt, list[i].lock); - if (lp->holder != locker) { - ret = DB_LOCK_NOTHELD; - break; - } - list[i].mode = lp->mode; - - ret = __lock_put_internal(lt, lp, 0); - __lock_checklocker(lt, lp, 0); - break; - case DB_LOCK_PUT_ALL: - /* Find the locker. */ - if ((ret = __lock_getobj(lt, locker, - NULL, DB_LOCK_LOCKER, &sh_locker)) != 0) - break; - - for (lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock); - lp != NULL; - lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock)) { - if ((ret = __lock_put_internal(lt, lp, 1)) != 0) - break; - } - __lock_freeobj(lt, sh_locker); - lt->region->nlockers--; - break; - case DB_LOCK_PUT_OBJ: - - /* Look up the object in the hash table. */ - HASHLOOKUP(lt->hashtab, __db_lockobj, links, - list[i].obj, sh_obj, lt->region->table_size, - __lock_ohash, __lock_cmp); - if (sh_obj == NULL) { - ret = EINVAL; - break; - } - /* - * Release waiters first, because they won't cause - * anyone else to be awakened. If we release the - * lockers first, all the waiters get awakened - * needlessly. - */ - for (lp = SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock); - lp != NULL; - lp = SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock)) { - lt->region->nreleases += lp->refcount; - __lock_remove_waiter(lt, sh_obj, lp, - DB_LSTAT_NOGRANT); - __lock_checklocker(lt, lp, 1); - } - - for (lp = SH_TAILQ_FIRST(&sh_obj->holders, __db_lock); - lp != NULL; - lp = SH_TAILQ_FIRST(&sh_obj->holders, __db_lock)) { - - lt->region->nreleases += lp->refcount; - SH_LIST_REMOVE(lp, locker_links, __db_lock); - SH_TAILQ_REMOVE(&sh_obj->holders, lp, links, - __db_lock); - lp->status = DB_LSTAT_FREE; - SH_TAILQ_INSERT_HEAD(<->region->free_locks, - lp, links, __db_lock); - } - - /* Now free the object. */ - __lock_freeobj(lt, sh_obj); - break; -#ifdef DEBUG - case DB_LOCK_DUMP: - /* Find the locker. */ - if ((ret = __lock_getobj(lt, locker, - NULL, DB_LOCK_LOCKER, &sh_locker)) != 0) - break; - - for (lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock); - lp != NULL; - lp = SH_LIST_NEXT(lp, locker_links, __db_lock)) { - __lock_printlock(lt, lp, 1); - ret = EINVAL; - } - if (ret == 0) { - __lock_freeobj(lt, sh_locker); - lt->region->nlockers--; - } - break; -#endif - default: - ret = EINVAL; - break; - } - } - - if (lt->region->need_dd && lt->region->detect != DB_LOCK_NORUN) { - run_dd = 1; - lt->region->need_dd = 0; - } else - run_dd = 0; - - UNLOCK_LOCKREGION(lt); - - if (ret == 0 && run_dd) - lock_detect(lt, 0, lt->region->detect); - - if (elistp && ret != 0) - *elistp = &list[i - 1]; - return (ret); -} - -int -lock_get(lt, locker, flags, obj, lock_mode, lock) - DB_LOCKTAB *lt; - u_int32_t locker, flags; - const DBT *obj; - db_lockmode_t lock_mode; - DB_LOCK *lock; -{ - struct __db_lock *lockp; - int ret; - - LOCK_PANIC_CHECK(lt); - - /* Validate arguments. */ - if ((ret = __db_fchk(lt->dbenv, - "lock_get", flags, DB_LOCK_NOWAIT | DB_LOCK_UPGRADE)) != 0) - return (ret); - - LOCK_LOCKREGION(lt); - - if ((ret = __lock_validate_region(lt)) == 0) { - if (LF_ISSET(DB_LOCK_UPGRADE)) - lockp = OFFSET_TO_LOCK(lt, *lock); - - if ((ret = __lock_get_internal(lt, - locker, NULL, flags, obj, lock_mode, &lockp)) == 0) { - if (!LF_ISSET(DB_LOCK_UPGRADE)) - *lock = LOCK_TO_OFFSET(lt, lockp); - lt->region->nrequests++; - } - } - - UNLOCK_LOCKREGION(lt); - return (ret); -} - -int -lock_tget(lt, txn, flags, obj, lock_mode, lock) - DB_LOCKTAB *lt; - DB_TXN *txn; - u_int32_t flags; - const DBT *obj; - db_lockmode_t lock_mode; - DB_LOCK *lock; -{ - struct __db_lock *lockp; - int ret; - - LOCK_PANIC_CHECK(lt); - - /* Validate arguments. */ - if ((ret = __db_fchk(lt->dbenv, - "lock_get", flags, DB_LOCK_NOWAIT | DB_LOCK_UPGRADE)) != 0) - return (ret); - - LOCK_LOCKREGION(lt); - - if ((ret = __lock_validate_region(lt)) == 0) { - if (LF_ISSET(DB_LOCK_UPGRADE)) - lockp = OFFSET_TO_LOCK(lt, *lock); - - if ((ret = __lock_get_internal(lt, - txn->txnid, txn, flags, obj, lock_mode, &lockp)) == 0) { - if (!LF_ISSET(DB_LOCK_UPGRADE)) - *lock = LOCK_TO_OFFSET(lt, lockp); - lt->region->nrequests++; - } - } - - UNLOCK_LOCKREGION(lt); - return (ret); -} -int -lock_put(lt, lock) - DB_LOCKTAB *lt; - DB_LOCK lock; -{ - struct __db_lock *lockp; - int ret, run_dd; - - LOCK_PANIC_CHECK(lt); - - LOCK_LOCKREGION(lt); - - if ((ret = __lock_validate_region(lt)) != 0) - return (ret); - else { - lockp = OFFSET_TO_LOCK(lt, lock); - ret = __lock_put_internal(lt, lockp, 0); - } - - __lock_checklocker(lt, lockp, 0); - - if (lt->region->need_dd && lt->region->detect != DB_LOCK_NORUN) { - run_dd = 1; - lt->region->need_dd = 0; - } else - run_dd = 0; - - UNLOCK_LOCKREGION(lt); - - if (ret == 0 && run_dd) - lock_detect(lt, 0, lt->region->detect); - - return (ret); -} - -static int -__lock_put_internal(lt, lockp, do_all) - DB_LOCKTAB *lt; - struct __db_lock *lockp; - int do_all; -{ - DB_LOCKOBJ *sh_obj; - int state_changed; - - if (lockp->refcount == 0 || (lockp->status != DB_LSTAT_HELD && - lockp->status != DB_LSTAT_WAITING) || lockp->obj == 0) { - __db_err(lt->dbenv, "lock_put: invalid lock %lu", - (u_long)((u_int8_t *)lockp - (u_int8_t *)lt->region)); - return (EINVAL); - } - - if (do_all) - lt->region->nreleases += lockp->refcount; - else - lt->region->nreleases++; - if (do_all == 0 && lockp->refcount > 1) { - lockp->refcount--; - return (0); - } - - /* Get the object associated with this lock. */ - sh_obj = (DB_LOCKOBJ *)((u_int8_t *)lockp + lockp->obj); - - /* Remove lock from locker list. */ - SH_LIST_REMOVE(lockp, locker_links, __db_lock); - - /* Remove this lock from its holders/waitlist. */ - if (lockp->status != DB_LSTAT_HELD) - __lock_remove_waiter(lt, sh_obj, lockp, DB_LSTAT_FREE); - else - SH_TAILQ_REMOVE(&sh_obj->holders, lockp, links, __db_lock); - - state_changed = __lock_promote(lt, sh_obj); - - /* Check if object should be reclaimed. */ - if (SH_TAILQ_FIRST(&sh_obj->holders, __db_lock) == NULL) { - HASHREMOVE_EL(lt->hashtab, __db_lockobj, - links, sh_obj, lt->region->table_size, __lock_lhash); - if (sh_obj->lockobj.size > sizeof(sh_obj->objdata)) - __db_shalloc_free(lt->mem, - SH_DBT_PTR(&sh_obj->lockobj)); - SH_TAILQ_INSERT_HEAD(<->region->free_objs, sh_obj, links, - __db_lockobj); - state_changed = 1; - } - - /* Free lock. */ - lockp->status = DB_LSTAT_FREE; - SH_TAILQ_INSERT_HEAD(<->region->free_locks, lockp, links, __db_lock); - - /* - * If we did not promote anyone; we need to run the deadlock - * detector again. - */ - if (state_changed == 0) - lt->region->need_dd = 1; - - return (0); -} - -static int -__lock_get_internal(lt, locker, txn, flags, obj, lock_mode, lockp) - DB_LOCKTAB *lt; - u_int32_t locker, flags; - DB_TXN *txn; - const DBT *obj; - db_lockmode_t lock_mode; - struct __db_lock **lockp; -{ - struct __db_lock *newl, *lp; - DB_LOCKOBJ *sh_obj, *sh_locker; - DB_LOCKREGION *lrp; - size_t newl_off; - int ihold, no_dd, ret; - - no_dd = ret = 0; - - /* - * Check that lock mode is valid. - */ - lrp = lt->region; - if ((u_int32_t)lock_mode >= lrp->nmodes) { - __db_err(lt->dbenv, - "lock_get: invalid lock mode %lu\n", (u_long)lock_mode); - return (EINVAL); - } - - /* Allocate a new lock. Optimize for the common case of a grant. */ - if ((newl = SH_TAILQ_FIRST(&lrp->free_locks, __db_lock)) == NULL) { - if ((ret = __lock_grow_region(lt, DB_LOCK_LOCK, 0)) != 0) - return (ret); - lrp = lt->region; - newl = SH_TAILQ_FIRST(&lrp->free_locks, __db_lock); - } - newl_off = LOCK_TO_OFFSET(lt, newl); - - /* Optimize for common case of granting a lock. */ - SH_TAILQ_REMOVE(&lrp->free_locks, newl, links, __db_lock); - - newl->mode = lock_mode; - newl->status = DB_LSTAT_HELD; - newl->holder = locker; - newl->refcount = 1; - - if ((ret = __lock_getobj(lt, 0, obj, DB_LOCK_OBJTYPE, &sh_obj)) != 0) - return (ret); - - lrp = lt->region; /* getobj might have grown */ - newl = OFFSET_TO_LOCK(lt, newl_off); - - /* Now make new lock point to object */ - newl->obj = SH_PTR_TO_OFF(newl, sh_obj); - - /* - * Now we have a lock and an object and we need to see if we should - * grant the lock. We use a FIFO ordering so we can only grant a - * new lock if it does not conflict with anyone on the holders list - * OR anyone on the waiters list. The reason that we don't grant if - * there's a conflict is that this can lead to starvation (a writer - * waiting on a popularly read item will never be granted). The - * downside of this is that a waiting reader can prevent an upgrade - * from reader to writer, which is not uncommon. - * - * There is one exception to the no-conflict rule. If a lock is held - * by the requesting locker AND the new lock does not conflict with - * any other holders, then we grant the lock. The most common place - * this happens is when the holder has a WRITE lock and a READ lock - * request comes in for the same locker. If we do not grant the read - * lock, then we guarantee deadlock. - * - * In case of conflict, we put the new lock on the end of the waiters - * list, unless we are upgrading in which case the locker goes on the - * front of the list. - */ - ihold = 0; - for (lp = SH_TAILQ_FIRST(&sh_obj->holders, __db_lock); - lp != NULL; - lp = SH_TAILQ_NEXT(lp, links, __db_lock)) { - if (locker == lp->holder || - __lock_is_parent(lp->holder, txn)) { - if (lp->mode == lock_mode && - lp->status == DB_LSTAT_HELD) { - if (LF_ISSET(DB_LOCK_UPGRADE)) - goto upgrade; - - /* - * Lock is held, so we can increment the - * reference count and return this lock. - */ - lp->refcount++; - *lockp = lp; - SH_TAILQ_INSERT_HEAD(&lrp->free_locks, - newl, links, __db_lock); - return (0); - } else - ihold = 1; - } else if (CONFLICTS(lt, lp->mode, lock_mode)) - break; - } - - /* - * If we are upgrading, then there are two scenarios. Either - * we had no conflicts, so we can do the upgrade. Or, there - * is a conflict and we should wait at the HEAD of the waiters - * list. - */ - if (LF_ISSET(DB_LOCK_UPGRADE)) { - if (lp == NULL) - goto upgrade; - - /* There was a conflict, wait. */ - SH_TAILQ_INSERT_HEAD(&sh_obj->waiters, newl, links, __db_lock); - goto wait; - } - - if (lp == NULL && !ihold) - for (lp = SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock); - lp != NULL; - lp = SH_TAILQ_NEXT(lp, links, __db_lock)) { - if (CONFLICTS(lt, lp->mode, lock_mode) && - locker != lp->holder) - break; - } - if (lp == NULL) - SH_TAILQ_INSERT_TAIL(&sh_obj->holders, newl, links); - else if (!(flags & DB_LOCK_NOWAIT)) - SH_TAILQ_INSERT_TAIL(&sh_obj->waiters, newl, links); - else { - /* Free the lock and return an error. */ - newl->status = DB_LSTAT_FREE; - SH_TAILQ_INSERT_HEAD(&lrp->free_locks, newl, links, __db_lock); - return (DB_LOCK_NOTGRANTED); - } - - /* - * Now, insert the lock onto its locker's list. If the locker does - * not currently hold any locks, there's no reason to run a deadlock - * detector, save that information. - */ - if ((ret = - __lock_getobj(lt, locker, NULL, DB_LOCK_LOCKER, &sh_locker)) != 0) - return (ret); - no_dd = SH_LIST_FIRST(&sh_locker->heldby, __db_lock) == NULL; - - lrp = lt->region; - SH_LIST_INSERT_HEAD(&sh_locker->heldby, newl, locker_links, __db_lock); - - if (lp != NULL) { - /* - * This is really a blocker for the process, so initialize it - * set. That way the current process will block when it tries - * to get it and the waking process will release it. - */ -wait: (void)__db_mutex_init(&newl->mutex, - MUTEX_LOCK_OFFSET(lt->region, &newl->mutex)); - (void)__db_mutex_lock(&newl->mutex, lt->reginfo.fd); - - newl->status = DB_LSTAT_WAITING; - lrp->nconflicts++; - - /* - * We are about to wait; must release the region mutex. Then, - * when we wakeup, we need to reacquire the region mutex before - * continuing. - */ - if (lrp->detect == DB_LOCK_NORUN) - lt->region->need_dd = 1; - UNLOCK_LOCKREGION(lt); - - /* - * We are about to wait; before waiting, see if the deadlock - * detector should be run. - */ - if (lrp->detect != DB_LOCK_NORUN && !no_dd) - (void)lock_detect(lt, 0, lrp->detect); - - (void)__db_mutex_lock(&newl->mutex, lt->reginfo.fd); - - LOCK_LOCKREGION(lt); - if (newl->status != DB_LSTAT_PENDING) { - /* - * If this lock errored due to a deadlock, then - * we have waiters that require promotion. - */ - if (newl->status == DB_LSTAT_ABORTED) - (void)__lock_promote(lt, sh_obj); - /* Return to free list. */ - __lock_checklocker(lt, newl, 0); - SH_TAILQ_INSERT_HEAD(&lrp->free_locks, newl, links, - __db_lock); - switch (newl->status) { - case DB_LSTAT_ABORTED: - ret = DB_LOCK_DEADLOCK; - break; - case DB_LSTAT_NOGRANT: - ret = DB_LOCK_NOTGRANTED; - break; - default: - ret = EINVAL; - break; - } - newl->status = DB_LSTAT_FREE; - newl = NULL; - } else if (LF_ISSET(DB_LOCK_UPGRADE)) { - /* - * The lock that was just granted got put on the - * holders list. Since we're upgrading some other - * lock, we've got to remove it here. - */ - SH_TAILQ_REMOVE(&sh_obj->holders, - newl, links, __db_lock); - goto upgrade; - } else - newl->status = DB_LSTAT_HELD; - } - - *lockp = newl; - return (ret); - -upgrade: - /* - * This was an upgrade, so return the new lock to the free list and - * upgrade the mode. - */ - (*lockp)->mode = lock_mode; - newl->status = DB_LSTAT_FREE; - SH_TAILQ_INSERT_HEAD(&lrp->free_locks, newl, links, __db_lock); - return (0); -} - -/* - * __lock_is_locked -- - * - * PUBLIC: int __lock_is_locked - * PUBLIC: __P((DB_LOCKTAB *, u_int32_t, DBT *, db_lockmode_t)); - */ -int -__lock_is_locked(lt, locker, dbt, mode) - DB_LOCKTAB *lt; - u_int32_t locker; - DBT *dbt; - db_lockmode_t mode; -{ - struct __db_lock *lp; - DB_LOCKOBJ *sh_obj; - DB_LOCKREGION *lrp; - - lrp = lt->region; - - /* Look up the object in the hash table. */ - HASHLOOKUP(lt->hashtab, __db_lockobj, links, - dbt, sh_obj, lrp->table_size, __lock_ohash, __lock_cmp); - if (sh_obj == NULL) - return (0); - - for (lp = SH_TAILQ_FIRST(&sh_obj->holders, __db_lock); - lp != NULL; - lp = SH_TAILQ_FIRST(&sh_obj->holders, __db_lock)) { - if (lp->holder == locker && lp->mode == mode) - return (1); - } - - return (0); -} - -/* - * __lock_printlock -- - * - * PUBLIC: void __lock_printlock __P((DB_LOCKTAB *, struct __db_lock *, int)); - */ -void -__lock_printlock(lt, lp, ispgno) - DB_LOCKTAB *lt; - struct __db_lock *lp; - int ispgno; -{ - DB_LOCKOBJ *lockobj; - db_pgno_t pgno; - size_t obj; - u_int8_t *ptr; - const char *mode, *status; - - switch (lp->mode) { - case DB_LOCK_IREAD: - mode = "IREAD"; - break; - case DB_LOCK_IWR: - mode = "IWR"; - break; - case DB_LOCK_IWRITE: - mode = "IWRITE"; - break; - case DB_LOCK_NG: - mode = "NG"; - break; - case DB_LOCK_READ: - mode = "READ"; - break; - case DB_LOCK_WRITE: - mode = "WRITE"; - break; - default: - mode = "UNKNOWN"; - break; - } - switch (lp->status) { - case DB_LSTAT_ABORTED: - status = "ABORT"; - break; - case DB_LSTAT_ERR: - status = "ERROR"; - break; - case DB_LSTAT_FREE: - status = "FREE"; - break; - case DB_LSTAT_HELD: - status = "HELD"; - break; - case DB_LSTAT_NOGRANT: - status = "NONE"; - break; - case DB_LSTAT_WAITING: - status = "WAIT"; - break; - case DB_LSTAT_PENDING: - status = "PENDING"; - break; - default: - status = "UNKNOWN"; - break; - } - printf("\t%lx\t%s\t%lu\t%s\t", - (u_long)lp->holder, mode, (u_long)lp->refcount, status); - - lockobj = (DB_LOCKOBJ *)((u_int8_t *)lp + lp->obj); - ptr = SH_DBT_PTR(&lockobj->lockobj); - if (ispgno) { - /* Assume this is a DBT lock. */ - memcpy(&pgno, ptr, sizeof(db_pgno_t)); - printf("page %lu\n", (u_long)pgno); - } else { - obj = (u_int8_t *)lp + lp->obj - (u_int8_t *)lt->region; - printf("0x%lx ", (u_long)obj); - __db_pr(ptr, lockobj->lockobj.size); - printf("\n"); - } -} - -/* - * PUBLIC: int __lock_getobj __P((DB_LOCKTAB *, - * PUBLIC: u_int32_t, const DBT *, u_int32_t type, DB_LOCKOBJ **)); - */ -int -__lock_getobj(lt, locker, dbt, type, objp) - DB_LOCKTAB *lt; - u_int32_t locker, type; - const DBT *dbt; - DB_LOCKOBJ **objp; -{ - DB_LOCKREGION *lrp; - DB_LOCKOBJ *sh_obj; - u_int32_t obj_size; - int ret; - void *p, *src; - - lrp = lt->region; - - /* Look up the object in the hash table. */ - if (type == DB_LOCK_OBJTYPE) { - HASHLOOKUP(lt->hashtab, __db_lockobj, links, dbt, sh_obj, - lrp->table_size, __lock_ohash, __lock_cmp); - obj_size = dbt->size; - } else { - HASHLOOKUP(lt->hashtab, __db_lockobj, links, locker, - sh_obj, lrp->table_size, __lock_locker_hash, - __lock_locker_cmp); - obj_size = sizeof(locker); - } - - /* - * If we found the object, then we can just return it. If - * we didn't find the object, then we need to create it. - */ - if (sh_obj == NULL) { - /* Create new object and then insert it into hash table. */ - if ((sh_obj = - SH_TAILQ_FIRST(&lrp->free_objs, __db_lockobj)) == NULL) { - if ((ret = __lock_grow_region(lt, DB_LOCK_OBJ, 0)) != 0) - return (ret); - lrp = lt->region; - sh_obj = SH_TAILQ_FIRST(&lrp->free_objs, __db_lockobj); - } - - /* - * If we can fit this object in the structure, do so instead - * of shalloc-ing space for it. - */ - if (obj_size <= sizeof(sh_obj->objdata)) - p = sh_obj->objdata; - else - if ((ret = - __db_shalloc(lt->mem, obj_size, 0, &p)) != 0) { - if ((ret = __lock_grow_region(lt, - DB_LOCK_MEM, obj_size)) != 0) - return (ret); - lrp = lt->region; - /* Reacquire the head of the list. */ - sh_obj = SH_TAILQ_FIRST(&lrp->free_objs, - __db_lockobj); - (void)__db_shalloc(lt->mem, obj_size, 0, &p); - } - - src = type == DB_LOCK_OBJTYPE ? dbt->data : (void *)&locker; - memcpy(p, src, obj_size); - - sh_obj->type = type; - SH_TAILQ_REMOVE(&lrp->free_objs, sh_obj, links, __db_lockobj); - - SH_TAILQ_INIT(&sh_obj->waiters); - if (type == DB_LOCK_LOCKER) - SH_LIST_INIT(&sh_obj->heldby); - else - SH_TAILQ_INIT(&sh_obj->holders); - sh_obj->lockobj.size = obj_size; - sh_obj->lockobj.off = SH_PTR_TO_OFF(&sh_obj->lockobj, p); - - HASHINSERT(lt->hashtab, - __db_lockobj, links, sh_obj, lrp->table_size, __lock_lhash); - - if (type == DB_LOCK_LOCKER) - lrp->nlockers++; - } - - *objp = sh_obj; - return (0); -} - -/* - * Any lock on the waitlist has a process waiting for it. Therefore, we - * can't return the lock to the freelist immediately. Instead, we can - * remove the lock from the list of waiters, set the status field of the - * lock, and then let the process waking up return the lock to the - * free list. - */ -static void -__lock_remove_waiter(lt, sh_obj, lockp, status) - DB_LOCKTAB *lt; - DB_LOCKOBJ *sh_obj; - struct __db_lock *lockp; - db_status_t status; -{ - SH_TAILQ_REMOVE(&sh_obj->waiters, lockp, links, __db_lock); - lockp->status = status; - - /* Wake whoever is waiting on this lock. */ - (void)__db_mutex_unlock(&lockp->mutex, lt->reginfo.fd); -} - -static void -__lock_checklocker(lt, lockp, do_remove) - DB_LOCKTAB *lt; - struct __db_lock *lockp; - int do_remove; -{ - DB_LOCKOBJ *sh_locker; - - if (do_remove) - SH_LIST_REMOVE(lockp, locker_links, __db_lock); - - /* if the locker list is NULL, free up the object. */ - if (__lock_getobj(lt, lockp->holder, NULL, DB_LOCK_LOCKER, &sh_locker) - == 0 && SH_LIST_FIRST(&sh_locker->heldby, __db_lock) == NULL) { - __lock_freeobj(lt, sh_locker); - lt->region->nlockers--; - } -} - -static void -__lock_freeobj(lt, obj) - DB_LOCKTAB *lt; - DB_LOCKOBJ *obj; -{ - HASHREMOVE_EL(lt->hashtab, - __db_lockobj, links, obj, lt->region->table_size, __lock_lhash); - if (obj->lockobj.size > sizeof(obj->objdata)) - __db_shalloc_free(lt->mem, SH_DBT_PTR(&obj->lockobj)); - SH_TAILQ_INSERT_HEAD(<->region->free_objs, obj, links, __db_lockobj); -} - -/* - * __lock_downgrade -- - * Used by the concurrent access product to downgrade write locks - * back to iwrite locks. - * - * PUBLIC: int __lock_downgrade __P((DB_LOCKTAB *, - * PUBLIC: DB_LOCK, db_lockmode_t, u_int32_t)); - */ -int -__lock_downgrade(lt, lock, new_mode, flags) - DB_LOCKTAB *lt; - DB_LOCK lock; - db_lockmode_t new_mode; - u_int32_t flags; -{ - struct __db_lock *lockp; - DB_LOCKOBJ *obj; - int ret; - - COMPQUIET(flags, 0); - LOCK_PANIC_CHECK(lt); - LOCK_LOCKREGION(lt); - - if ((ret = __lock_validate_region(lt)) == 0) { - lockp = OFFSET_TO_LOCK(lt, lock); - lockp->mode = new_mode; - - /* Get the object associated with this lock. */ - obj = (DB_LOCKOBJ *)((u_int8_t *)lockp + lockp->obj); - (void)__lock_promote(lt, obj); - ++lt->region->nreleases; - } - - UNLOCK_LOCKREGION(lt); - - return (ret); -} - -/* - * __lock_promote -- - * - * Look through the waiters and holders lists and decide which (if any) - * locks can be promoted. Promote any that are eligible. - */ -static int -__lock_promote(lt, obj) - DB_LOCKTAB *lt; - DB_LOCKOBJ *obj; -{ - struct __db_lock *lp_w, *lp_h, *next_waiter; - int state_changed, waiter_is_txn; - - /* - * We need to do lock promotion. We also need to determine if - * we're going to need to run the deadlock detector again. If - * we release locks, and there are waiters, but no one gets promoted, - * then we haven't fundamentally changed the lockmgr state, so - * we may still have a deadlock and we have to run again. However, - * if there were no waiters, or we actually promoted someone, then - * we are OK and we don't have to run it immediately. - * - * During promotion, we look for state changes so we can return - * this information to the caller. - */ - for (lp_w = SH_TAILQ_FIRST(&obj->waiters, __db_lock), - state_changed = lp_w == NULL; - lp_w != NULL; - lp_w = next_waiter) { - waiter_is_txn = TXN_IS_HOLDING(lp_w); - next_waiter = SH_TAILQ_NEXT(lp_w, links, __db_lock); - for (lp_h = SH_TAILQ_FIRST(&obj->holders, __db_lock); - lp_h != NULL; - lp_h = SH_TAILQ_NEXT(lp_h, links, __db_lock)) { - if (CONFLICTS(lt, lp_h->mode, lp_w->mode) && - lp_h->holder != lp_w->holder && - !(waiter_is_txn && - TXN_IS_HOLDING(lp_h) && - __txn_is_ancestor(lt->dbenv->tx_info, - lp_h->txnoff, lp_w->txnoff))) - break; - } - if (lp_h != NULL) /* Found a conflict. */ - break; - - /* No conflict, promote the waiting lock. */ - SH_TAILQ_REMOVE(&obj->waiters, lp_w, links, __db_lock); - lp_w->status = DB_LSTAT_PENDING; - SH_TAILQ_INSERT_TAIL(&obj->holders, lp_w, links); - - /* Wake up waiter. */ - (void)__db_mutex_unlock(&lp_w->mutex, lt->reginfo.fd); - state_changed = 1; - } - - return (state_changed); -} - -static int -__lock_is_parent(locker, txn) - u_int32_t locker; - DB_TXN *txn; -{ - DB_TXN *t; - - if (txn == NULL) - return (0); - - for (t = txn->parent; t != NULL; t = t->parent) - if (t->txnid == locker) - return (1); - - return (0); -} diff --git a/db2/lock/lock_conflict.c b/db2/lock/lock_conflict.c deleted file mode 100644 index 4be858af7a..0000000000 --- a/db2/lock/lock_conflict.c +++ /dev/null @@ -1,39 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996, 1997, 1998 - * Sleepycat Software. All rights reserved. - */ - -#include "config.h" - -#ifndef lint -static const char sccsid[] = "@(#)lock_conflict.c 10.4 (Sleepycat) 11/20/98"; -#endif /* not lint */ - -#ifndef NO_SYSTEM_INCLUDES -#include <sys/types.h> -#endif - -#include "db_int.h" - -/* - * The conflict arrays are set up such that the row is the lock you - * are holding and the column is the lock that is desired. - */ -const u_int8_t db_rw_conflicts[] = { - /* N R W */ - /* N */ 0, 0, 0, - /* R */ 0, 0, 1, - /* W */ 0, 1, 1 -}; - -const u_int8_t db_riw_conflicts[] = { - /* N S X IX IS SIX */ - /* N */ 0, 0, 0, 0, 0, 0, - /* S */ 0, 0, 1, 1, 0, 1, - /* X */ 1, 1, 1, 1, 1, 1, - /* IX */ 0, 1, 1, 0, 0, 0, - /* IS */ 0, 0, 1, 0, 0, 0, - /* SIX */ 0, 1, 1, 0, 0, 0 -}; diff --git a/db2/lock/lock_deadlock.c b/db2/lock/lock_deadlock.c deleted file mode 100644 index 8b2f91bc9e..0000000000 --- a/db2/lock/lock_deadlock.c +++ /dev/null @@ -1,502 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996, 1997, 1998 - * Sleepycat Software. All rights reserved. - */ - -#include "config.h" - -#ifndef lint -static const char sccsid[] = "@(#)lock_deadlock.c 10.37 (Sleepycat) 10/4/98"; -#endif /* not lint */ - -#ifndef NO_SYSTEM_INCLUDES -#include <sys/types.h> - -#include <errno.h> -#include <string.h> -#endif - -#include "db_int.h" -#include "shqueue.h" -#include "db_shash.h" -#include "lock.h" -#include "common_ext.h" - -#define ISSET_MAP(M, N) (M[(N) / 32] & (1 << (N) % 32)) - -#define CLEAR_MAP(M, N) { \ - u_int32_t __i; \ - for (__i = 0; __i < (N); __i++) \ - M[__i] = 0; \ -} - -#define SET_MAP(M, B) (M[(B) / 32] |= (1 << ((B) % 32))) -#define CLR_MAP(M, B) (M[(B) / 32] &= ~(1 << ((B) % 32))) - -#define OR_MAP(D, S, N) { \ - u_int32_t __i; \ - for (__i = 0; __i < (N); __i++) \ - D[__i] |= S[__i]; \ -} -#define BAD_KILLID 0xffffffff - -typedef struct { - int valid; - u_int32_t id; - DB_LOCK last_lock; - db_pgno_t pgno; -} locker_info; - -static int __dd_abort __P((DB_ENV *, locker_info *)); -static int __dd_build - __P((DB_ENV *, u_int32_t **, u_int32_t *, locker_info **)); -static u_int32_t - *__dd_find __P((u_int32_t *, locker_info *, u_int32_t)); - -#ifdef DIAGNOSTIC -static void __dd_debug __P((DB_ENV *, locker_info *, u_int32_t *, u_int32_t)); -#endif - -int -lock_detect(lt, flags, atype) - DB_LOCKTAB *lt; - u_int32_t flags, atype; -{ - DB_ENV *dbenv; - locker_info *idmap; - u_int32_t *bitmap, *deadlock, i, killid, nentries, nlockers; - int do_pass, ret; - - LOCK_PANIC_CHECK(lt); - - /* Validate arguments. */ - if ((ret = - __db_fchk(lt->dbenv, "lock_detect", flags, DB_LOCK_CONFLICT)) != 0) - return (ret); - - /* Check if a detector run is necessary. */ - dbenv = lt->dbenv; - if (LF_ISSET(DB_LOCK_CONFLICT)) { - /* Make a pass every time a lock waits. */ - LOCK_LOCKREGION(lt); - do_pass = dbenv->lk_info->region->need_dd != 0; - UNLOCK_LOCKREGION(lt); - - if (!do_pass) - return (0); - } - - /* Build the waits-for bitmap. */ - if ((ret = __dd_build(dbenv, &bitmap, &nlockers, &idmap)) != 0) - return (ret); - - if (nlockers == 0) - return (0); -#ifdef DIAGNOSTIC - if (dbenv->db_verbose != 0) - __dd_debug(dbenv, idmap, bitmap, nlockers); -#endif - /* Find a deadlock. */ - deadlock = __dd_find(bitmap, idmap, nlockers); - nentries = ALIGN(nlockers, 32) / 32; - killid = BAD_KILLID; - if (deadlock != NULL) { - /* Kill someone. */ - switch (atype) { - case DB_LOCK_OLDEST: - /* - * Find the first bit set in the current - * array and then look for a lower tid in - * the array. - */ - for (i = 0; i < nlockers; i++) - if (ISSET_MAP(deadlock, i)) - killid = i; - - if (killid == BAD_KILLID) { - __db_err(dbenv, - "warning: could not find locker to abort"); - break; - } - - /* - * The oldest transaction has the lowest - * transaction id. - */ - for (i = killid + 1; i < nlockers; i++) - if (ISSET_MAP(deadlock, i) && - idmap[i].id < idmap[killid].id) - killid = i; - break; - case DB_LOCK_DEFAULT: - case DB_LOCK_RANDOM: - /* - * We are trying to calculate the id of the - * locker whose entry is indicated by deadlock. - */ - killid = (deadlock - bitmap) / nentries; - break; - case DB_LOCK_YOUNGEST: - /* - * Find the first bit set in the current - * array and then look for a lower tid in - * the array. - */ - for (i = 0; i < nlockers; i++) - if (ISSET_MAP(deadlock, i)) - killid = i; - - if (killid == BAD_KILLID) { - __db_err(dbenv, - "warning: could not find locker to abort"); - break; - } - /* - * The youngest transaction has the highest - * transaction id. - */ - for (i = killid + 1; i < nlockers; i++) - if (ISSET_MAP(deadlock, i) && - idmap[i].id > idmap[killid].id) - killid = i; - break; - default: - killid = BAD_KILLID; - ret = EINVAL; - } - - /* Kill the locker with lockid idmap[killid]. */ - if (dbenv->db_verbose != 0 && killid != BAD_KILLID) - __db_err(dbenv, "Aborting locker %lx", - (u_long)idmap[killid].id); - - if (killid != BAD_KILLID && - (ret = __dd_abort(dbenv, &idmap[killid])) != 0) - __db_err(dbenv, - "warning: unable to abort locker %lx", - (u_long)idmap[killid].id); - } - __os_free(bitmap, 0); - __os_free(idmap, 0); - - return (ret); -} - -/* - * ======================================================================== - * Utilities - */ -static int -__dd_build(dbenv, bmp, nlockers, idmap) - DB_ENV *dbenv; - u_int32_t **bmp, *nlockers; - locker_info **idmap; -{ - struct __db_lock *lp; - DB_LOCKTAB *lt; - DB_LOCKOBJ *op, *lo, *lockerp; - u_int8_t *pptr; - locker_info *id_array; - u_int32_t *bitmap, count, *entryp, i, id, nentries, *tmpmap; - int is_first, ret; - - lt = dbenv->lk_info; - - /* - * We'll check how many lockers there are, add a few more in for - * good measure and then allocate all the structures. Then we'll - * verify that we have enough room when we go back in and get the - * mutex the second time. - */ - LOCK_LOCKREGION(lt); -retry: count = lt->region->nlockers; - lt->region->need_dd = 0; - UNLOCK_LOCKREGION(lt); - - if (count == 0) { - *nlockers = 0; - return (0); - } - - if (dbenv->db_verbose) - __db_err(dbenv, "%lu lockers", (u_long)count); - - count += 10; - nentries = ALIGN(count, 32) / 32; - /* - * Allocate enough space for a count by count bitmap matrix. - * - * XXX - * We can probably save the malloc's between iterations just - * reallocing if necessary because count grew by too much. - */ - if ((ret = __os_calloc((size_t)count, - sizeof(u_int32_t) * nentries, &bitmap)) != 0) - return (ret); - - if ((ret = __os_calloc(sizeof(u_int32_t), nentries, &tmpmap)) != 0) { - __os_free(bitmap, sizeof(u_int32_t) * nentries); - return (ret); - } - - if ((ret = - __os_calloc((size_t)count, sizeof(locker_info), &id_array)) != 0) { - __os_free(bitmap, count * sizeof(u_int32_t) * nentries); - __os_free(tmpmap, sizeof(u_int32_t) * nentries); - return (ret); - } - - /* - * Now go back in and actually fill in the matrix. - */ - LOCK_LOCKREGION(lt); - if (lt->region->nlockers > count) { - __os_free(bitmap, count * sizeof(u_int32_t) * nentries); - __os_free(tmpmap, sizeof(u_int32_t) * nentries); - __os_free(id_array, count * sizeof(locker_info)); - goto retry; - } - - /* - * First we go through and assign each locker a deadlock detector id. - * Note that we fill in the idmap in the next loop since that's the - * only place where we conveniently have both the deadlock id and the - * actual locker. - */ - for (id = 0, i = 0; i < lt->region->table_size; i++) - for (op = SH_TAILQ_FIRST(<->hashtab[i], __db_lockobj); - op != NULL; op = SH_TAILQ_NEXT(op, links, __db_lockobj)) - if (op->type == DB_LOCK_LOCKER) - op->dd_id = id++; - /* - * We go through the hash table and find each object. For each object, - * we traverse the waiters list and add an entry in the waitsfor matrix - * for each waiter/holder combination. - */ - for (i = 0; i < lt->region->table_size; i++) { - for (op = SH_TAILQ_FIRST(<->hashtab[i], __db_lockobj); - op != NULL; op = SH_TAILQ_NEXT(op, links, __db_lockobj)) { - if (op->type != DB_LOCK_OBJTYPE) - continue; - CLEAR_MAP(tmpmap, nentries); - - /* - * First we go through and create a bit map that - * represents all the holders of this object. - */ - for (lp = SH_TAILQ_FIRST(&op->holders, __db_lock); - lp != NULL; - lp = SH_TAILQ_NEXT(lp, links, __db_lock)) { - if (__lock_getobj(lt, lp->holder, - NULL, DB_LOCK_LOCKER, &lockerp) != 0) { - __db_err(dbenv, - "warning unable to find object"); - continue; - } - id_array[lockerp->dd_id].id = lp->holder; - id_array[lockerp->dd_id].valid = 1; - - /* - * If the holder has already been aborted, then - * we should ignore it for now. - */ - if (lp->status == DB_LSTAT_HELD) - SET_MAP(tmpmap, lockerp->dd_id); - } - - /* - * Next, for each waiter, we set its row in the matrix - * equal to the map of holders we set up above. - */ - for (is_first = 1, - lp = SH_TAILQ_FIRST(&op->waiters, __db_lock); - lp != NULL; - is_first = 0, - lp = SH_TAILQ_NEXT(lp, links, __db_lock)) { - if (__lock_getobj(lt, lp->holder, - NULL, DB_LOCK_LOCKER, &lockerp) != 0) { - __db_err(dbenv, - "warning unable to find object"); - continue; - } - id_array[lockerp->dd_id].id = lp->holder; - id_array[lockerp->dd_id].valid = 1; - - /* - * If the transaction is pending abortion, then - * ignore it on this iteration. - */ - if (lp->status != DB_LSTAT_WAITING) - continue; - - entryp = bitmap + (nentries * lockerp->dd_id); - OR_MAP(entryp, tmpmap, nentries); - /* - * If this is the first waiter on the queue, - * then we remove the waitsfor relationship - * with oneself. However, if it's anywhere - * else on the queue, then we have to keep - * it and we have an automatic deadlock. - */ - if (is_first) - CLR_MAP(entryp, lockerp->dd_id); - } - } - } - - /* Now for each locker; record its last lock. */ - for (id = 0; id < count; id++) { - if (!id_array[id].valid) - continue; - if (__lock_getobj(lt, - id_array[id].id, NULL, DB_LOCK_LOCKER, &lockerp) != 0) { - __db_err(dbenv, - "No locks for locker %lu", (u_long)id_array[id].id); - continue; - } - lp = SH_LIST_FIRST(&lockerp->heldby, __db_lock); - if (lp != NULL) { - id_array[id].last_lock = LOCK_TO_OFFSET(lt, lp); - lo = (DB_LOCKOBJ *)((u_int8_t *)lp + lp->obj); - pptr = SH_DBT_PTR(&lo->lockobj); - if (lo->lockobj.size >= sizeof(db_pgno_t)) - memcpy(&id_array[id].pgno, pptr, - sizeof(db_pgno_t)); - else - id_array[id].pgno = 0; - } - } - - /* Pass complete, reset the deadlock detector bit. */ - lt->region->need_dd = 0; - UNLOCK_LOCKREGION(lt); - - /* - * Now we can release everything except the bitmap matrix that we - * created. - */ - *nlockers = id; - *idmap = id_array; - *bmp = bitmap; - __os_free(tmpmap, sizeof(u_int32_t) * nentries); - return (0); -} - -static u_int32_t * -__dd_find(bmp, idmap, nlockers) - u_int32_t *bmp, nlockers; - locker_info *idmap; -{ - u_int32_t i, j, nentries, *mymap, *tmpmap; - - /* - * For each locker, OR in the bits from the lockers on which that - * locker is waiting. - */ - nentries = ALIGN(nlockers, 32) / 32; - for (mymap = bmp, i = 0; i < nlockers; i++, mymap += nentries) { - if (!idmap[i].valid) - continue; - for (j = 0; j < nlockers; j++) { - if (ISSET_MAP(mymap, j)) { - /* Find the map for this bit. */ - tmpmap = bmp + (nentries * j); - OR_MAP(mymap, tmpmap, nentries); - if (ISSET_MAP(mymap, i)) - return (mymap); - } - } - } - return (NULL); -} - -static int -__dd_abort(dbenv, info) - DB_ENV *dbenv; - locker_info *info; -{ - struct __db_lock *lockp; - DB_LOCKTAB *lt; - DB_LOCKOBJ *lockerp, *sh_obj; - int ret; - - lt = dbenv->lk_info; - LOCK_LOCKREGION(lt); - - /* Find the locker's last lock. */ - if ((ret = - __lock_getobj(lt, info->id, NULL, DB_LOCK_LOCKER, &lockerp)) != 0) - goto out; - - lockp = SH_LIST_FIRST(&lockerp->heldby, __db_lock); - - /* - * It's possible that this locker was already aborted. - * If that's the case, make sure that we remove its - * locker from the hash table. - */ - if (lockp == NULL) { - HASHREMOVE_EL(lt->hashtab, __db_lockobj, - links, lockerp, lt->region->table_size, __lock_lhash); - SH_TAILQ_INSERT_HEAD(<->region->free_objs, - lockerp, links, __db_lockobj); - lt->region->nlockers--; - goto out; - } else if (LOCK_TO_OFFSET(lt, lockp) != info->last_lock || - lockp->status != DB_LSTAT_WAITING) - goto out; - - /* Abort lock, take it off list, and wake up this lock. */ - lockp->status = DB_LSTAT_ABORTED; - lt->region->ndeadlocks++; - SH_LIST_REMOVE(lockp, locker_links, __db_lock); - sh_obj = (DB_LOCKOBJ *)((u_int8_t *)lockp + lockp->obj); - SH_TAILQ_REMOVE(&sh_obj->waiters, lockp, links, __db_lock); - (void)__db_mutex_unlock(&lockp->mutex, lt->reginfo.fd); - - ret = 0; - -out: UNLOCK_LOCKREGION(lt); - return (ret); -} - -#ifdef DIAGNOSTIC -static void -__dd_debug(dbenv, idmap, bitmap, nlockers) - DB_ENV *dbenv; - locker_info *idmap; - u_int32_t *bitmap, nlockers; -{ - u_int32_t i, j, *mymap, nentries; - int ret; - char *msgbuf; - - __db_err(dbenv, "Waitsfor array"); - __db_err(dbenv, "waiter\twaiting on"); - - /* Allocate space to print 10 bytes per item waited on. */ -#undef MSGBUF_LEN -#define MSGBUF_LEN ((nlockers + 1) * 10 + 64) - if ((ret = __os_malloc(MSGBUF_LEN, NULL, &msgbuf)) != 0) - return; - - nentries = ALIGN(nlockers, 32) / 32; - for (mymap = bitmap, i = 0; i < nlockers; i++, mymap += nentries) { - if (!idmap[i].valid) - continue; - sprintf(msgbuf, /* Waiter. */ - "%lx/%lu:\t", (u_long)idmap[i].id, (u_long)idmap[i].pgno); - for (j = 0; j < nlockers; j++) - if (ISSET_MAP(mymap, j)) - sprintf(msgbuf, "%s %lx", msgbuf, - (u_long)idmap[j].id); - (void)sprintf(msgbuf, - "%s %lu", msgbuf, (u_long)idmap[i].last_lock); - __db_err(dbenv, msgbuf); - } - - __os_free(msgbuf, MSGBUF_LEN); -} -#endif diff --git a/db2/lock/lock_region.c b/db2/lock/lock_region.c deleted file mode 100644 index 613a6cefb2..0000000000 --- a/db2/lock/lock_region.c +++ /dev/null @@ -1,743 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996, 1997, 1998 - * Sleepycat Software. All rights reserved. - */ - -#include "config.h" - -#ifndef lint -static const char sccsid[] = "@(#)lock_region.c 10.21 (Sleepycat) 10/19/98"; -#endif /* not lint */ - -#ifndef NO_SYSTEM_INCLUDES -#include <sys/types.h> - -#include <ctype.h> -#include <errno.h> -#include <string.h> -#endif - -#include "db_int.h" -#include "shqueue.h" -#include "db_shash.h" -#include "lock.h" -#include "common_ext.h" - -static u_int32_t __lock_count_locks __P((DB_LOCKREGION *)); -static u_int32_t __lock_count_objs __P((DB_LOCKREGION *)); -static void __lock_dump_locker __P((DB_LOCKTAB *, DB_LOCKOBJ *, FILE *)); -static void __lock_dump_object __P((DB_LOCKTAB *, DB_LOCKOBJ *, FILE *)); -static const char * - __lock_dump_status __P((db_status_t)); -static void __lock_reset_region __P((DB_LOCKTAB *)); -static int __lock_tabinit __P((DB_ENV *, DB_LOCKREGION *)); - -int -lock_open(path, flags, mode, dbenv, ltp) - const char *path; - u_int32_t flags; - int mode; - DB_ENV *dbenv; - DB_LOCKTAB **ltp; -{ - DB_LOCKTAB *lt; - u_int32_t lock_modes, maxlocks, regflags; - int ret; - - /* Validate arguments. */ -#ifdef HAVE_SPINLOCKS -#define OKFLAGS (DB_CREATE | DB_THREAD) -#else -#define OKFLAGS (DB_CREATE) -#endif - if ((ret = __db_fchk(dbenv, "lock_open", flags, OKFLAGS)) != 0) - return (ret); - - /* Create the lock table structure. */ - if ((ret = __os_calloc(1, sizeof(DB_LOCKTAB), <)) != 0) - return (ret); - lt->dbenv = dbenv; - - /* Grab the values that we need to compute the region size. */ - lock_modes = DB_LOCK_RW_N; - maxlocks = DB_LOCK_DEFAULT_N; - regflags = REGION_SIZEDEF; - if (dbenv != NULL) { - if (dbenv->lk_modes != 0) { - lock_modes = dbenv->lk_modes; - regflags = 0; - } - if (dbenv->lk_max != 0) { - maxlocks = dbenv->lk_max; - regflags = 0; - } - } - - /* Join/create the lock region. */ - lt->reginfo.dbenv = dbenv; - lt->reginfo.appname = DB_APP_NONE; - if (path == NULL) - lt->reginfo.path = NULL; - else - if ((ret = __os_strdup(path, <->reginfo.path)) != 0) - goto err; - lt->reginfo.file = DB_DEFAULT_LOCK_FILE; - lt->reginfo.mode = mode; - lt->reginfo.size = - LOCK_REGION_SIZE(lock_modes, maxlocks, __db_tablesize(maxlocks)); - lt->reginfo.dbflags = flags; - lt->reginfo.addr = NULL; - lt->reginfo.fd = -1; - lt->reginfo.flags = regflags; - - if ((ret = __db_rattach(<->reginfo)) != 0) - goto err; - - /* Now set up the pointer to the region. */ - lt->region = lt->reginfo.addr; - - /* Initialize the region if we created it. */ - if (F_ISSET(<->reginfo, REGION_CREATED)) { - lt->region->maxlocks = maxlocks; - lt->region->nmodes = lock_modes; - if ((ret = __lock_tabinit(dbenv, lt->region)) != 0) - goto err; - } else { - /* Check for an unexpected region. */ - if (lt->region->magic != DB_LOCKMAGIC) { - __db_err(dbenv, - "lock_open: %s: bad magic number", path); - ret = EINVAL; - goto err; - } - } - - /* Check for automatic deadlock detection. */ - if (dbenv != NULL && dbenv->lk_detect != DB_LOCK_NORUN) { - if (lt->region->detect != DB_LOCK_NORUN && - dbenv->lk_detect != DB_LOCK_DEFAULT && - lt->region->detect != dbenv->lk_detect) { - __db_err(dbenv, - "lock_open: incompatible deadlock detector mode"); - ret = EINVAL; - goto err; - } - if (lt->region->detect == DB_LOCK_NORUN) - lt->region->detect = dbenv->lk_detect; - } - - /* Set up remaining pointers into region. */ - lt->conflicts = (u_int8_t *)lt->region + sizeof(DB_LOCKREGION); - lt->hashtab = - (DB_HASHTAB *)((u_int8_t *)lt->region + lt->region->hash_off); - lt->mem = (void *)((u_int8_t *)lt->region + lt->region->mem_off); - - UNLOCK_LOCKREGION(lt); - *ltp = lt; - return (0); - -err: if (lt->reginfo.addr != NULL) { - UNLOCK_LOCKREGION(lt); - (void)__db_rdetach(<->reginfo); - if (F_ISSET(<->reginfo, REGION_CREATED)) - (void)lock_unlink(path, 1, dbenv); - } - - if (lt->reginfo.path != NULL) - __os_freestr(lt->reginfo.path); - __os_free(lt, sizeof(*lt)); - return (ret); -} - -/* - * __lock_panic -- - * Panic a lock region. - * - * PUBLIC: void __lock_panic __P((DB_ENV *)); - */ -void -__lock_panic(dbenv) - DB_ENV *dbenv; -{ - if (dbenv->lk_info != NULL) - dbenv->lk_info->region->hdr.panic = 1; -} - - -/* - * __lock_tabinit -- - * Initialize the lock region. - */ -static int -__lock_tabinit(dbenv, lrp) - DB_ENV *dbenv; - DB_LOCKREGION *lrp; -{ - struct __db_lock *lp; - struct lock_header *tq_head; - struct obj_header *obj_head; - DB_LOCKOBJ *op; - u_int32_t i, nelements; - const u_int8_t *conflicts; - u_int8_t *curaddr; - - conflicts = dbenv == NULL || dbenv->lk_conflicts == NULL ? - db_rw_conflicts : dbenv->lk_conflicts; - - lrp->table_size = __db_tablesize(lrp->maxlocks); - lrp->magic = DB_LOCKMAGIC; - lrp->version = DB_LOCKVERSION; - lrp->id = 0; - /* - * These fields (lrp->maxlocks, lrp->nmodes) are initialized - * in the caller, since we had to grab those values to size - * the region. - */ - lrp->need_dd = 0; - lrp->detect = DB_LOCK_NORUN; - lrp->numobjs = lrp->maxlocks; - lrp->nlockers = 0; - lrp->mem_bytes = ALIGN(STRING_SIZE(lrp->maxlocks), sizeof(size_t)); - lrp->increment = lrp->hdr.size / 2; - lrp->nconflicts = 0; - lrp->nrequests = 0; - lrp->nreleases = 0; - lrp->ndeadlocks = 0; - - /* - * As we write the region, we've got to maintain the alignment - * for the structures that follow each chunk. This information - * ends up being encapsulated both in here as well as in the - * lock.h file for the XXX_SIZE macros. - */ - /* Initialize conflict matrix. */ - curaddr = (u_int8_t *)lrp + sizeof(DB_LOCKREGION); - memcpy(curaddr, conflicts, lrp->nmodes * lrp->nmodes); - curaddr += lrp->nmodes * lrp->nmodes; - - /* - * Initialize hash table. - */ - curaddr = (u_int8_t *)ALIGNP(curaddr, LOCK_HASH_ALIGN); - lrp->hash_off = curaddr - (u_int8_t *)lrp; - nelements = lrp->table_size; - __db_hashinit(curaddr, nelements); - curaddr += nelements * sizeof(DB_HASHTAB); - - /* - * Initialize locks onto a free list. Since locks contains mutexes, - * we need to make sure that each lock is aligned on a MUTEX_ALIGNMENT - * boundary. - */ - curaddr = (u_int8_t *)ALIGNP(curaddr, MUTEX_ALIGNMENT); - tq_head = &lrp->free_locks; - SH_TAILQ_INIT(tq_head); - - for (i = 0; i++ < lrp->maxlocks; - curaddr += ALIGN(sizeof(struct __db_lock), MUTEX_ALIGNMENT)) { - lp = (struct __db_lock *)curaddr; - lp->status = DB_LSTAT_FREE; - SH_TAILQ_INSERT_HEAD(tq_head, lp, links, __db_lock); - } - - /* Initialize objects onto a free list. */ - obj_head = &lrp->free_objs; - SH_TAILQ_INIT(obj_head); - - for (i = 0; i++ < lrp->maxlocks; curaddr += sizeof(DB_LOCKOBJ)) { - op = (DB_LOCKOBJ *)curaddr; - SH_TAILQ_INSERT_HEAD(obj_head, op, links, __db_lockobj); - } - - /* - * Initialize the string space; as for all shared memory allocation - * regions, this requires size_t alignment, since we store the - * lengths of malloc'd areas in the area. - */ - curaddr = (u_int8_t *)ALIGNP(curaddr, sizeof(size_t)); - lrp->mem_off = curaddr - (u_int8_t *)lrp; - __db_shalloc_init(curaddr, lrp->mem_bytes); - return (0); -} - -int -lock_close(lt) - DB_LOCKTAB *lt; -{ - int ret; - - LOCK_PANIC_CHECK(lt); - - if ((ret = __db_rdetach(<->reginfo)) != 0) - return (ret); - - if (lt->reginfo.path != NULL) - __os_freestr(lt->reginfo.path); - __os_free(lt, sizeof(*lt)); - - return (0); -} - -int -lock_unlink(path, force, dbenv) - const char *path; - int force; - DB_ENV *dbenv; -{ - REGINFO reginfo; - int ret; - - memset(®info, 0, sizeof(reginfo)); - reginfo.dbenv = dbenv; - reginfo.appname = DB_APP_NONE; - if (path != NULL && (ret = __os_strdup(path, ®info.path)) != 0) - return (ret); - reginfo.file = DB_DEFAULT_LOCK_FILE; - ret = __db_runlink(®info, force); - if (reginfo.path != NULL) - __os_freestr(reginfo.path); - return (ret); -} - -/* - * __lock_validate_region -- - * Called at every interface to verify if the region has changed size, - * and if so, to remap the region in and reset the process' pointers. - * - * PUBLIC: int __lock_validate_region __P((DB_LOCKTAB *)); - */ -int -__lock_validate_region(lt) - DB_LOCKTAB *lt; -{ - int ret; - - if (lt->reginfo.size == lt->region->hdr.size) - return (0); - - /* Detach/reattach the region. */ - if ((ret = __db_rreattach(<->reginfo, lt->region->hdr.size)) != 0) - return (ret); - - /* Reset region information. */ - lt->region = lt->reginfo.addr; - __lock_reset_region(lt); - - return (0); -} - -/* - * __lock_grow_region -- - * We have run out of space; time to grow the region. - * - * PUBLIC: int __lock_grow_region __P((DB_LOCKTAB *, int, size_t)); - */ -int -__lock_grow_region(lt, which, howmuch) - DB_LOCKTAB *lt; - int which; - size_t howmuch; -{ - struct __db_lock *newl; - struct lock_header *lock_head; - struct obj_header *obj_head; - DB_LOCKOBJ *op; - DB_LOCKREGION *lrp; - float lock_ratio, obj_ratio; - size_t incr, oldsize, used, usedmem; - u_int32_t i, newlocks, newmem, newobjs, usedlocks, usedobjs; - u_int8_t *curaddr; - int ret; - - lrp = lt->region; - oldsize = lrp->hdr.size; - incr = lrp->increment; - - /* Figure out how much of each sort of space we have. */ - usedmem = lrp->mem_bytes - __db_shalloc_count(lt->mem); - usedobjs = lrp->numobjs - __lock_count_objs(lrp); - usedlocks = lrp->maxlocks - __lock_count_locks(lrp); - - /* - * Figure out what fraction of the used space belongs to each - * different type of "thing" in the region. Then partition the - * new space up according to this ratio. - */ - used = usedmem + - usedlocks * ALIGN(sizeof(struct __db_lock), MUTEX_ALIGNMENT) + - usedobjs * sizeof(DB_LOCKOBJ); - - lock_ratio = usedlocks * - ALIGN(sizeof(struct __db_lock), MUTEX_ALIGNMENT) / (float)used; - obj_ratio = usedobjs * sizeof(DB_LOCKOBJ) / (float)used; - - newlocks = (u_int32_t)(lock_ratio * - incr / ALIGN(sizeof(struct __db_lock), MUTEX_ALIGNMENT)); - newobjs = (u_int32_t)(obj_ratio * incr / sizeof(DB_LOCKOBJ)); - newmem = incr - - (newobjs * sizeof(DB_LOCKOBJ) + - newlocks * ALIGN(sizeof(struct __db_lock), MUTEX_ALIGNMENT)); - - /* - * Make sure we allocate enough memory for the object being - * requested. - */ - switch (which) { - case DB_LOCK_LOCK: - if (newlocks == 0) { - newlocks = 10; - incr += newlocks * sizeof(struct __db_lock); - } - break; - case DB_LOCK_OBJ: - if (newobjs == 0) { - newobjs = 10; - incr += newobjs * sizeof(DB_LOCKOBJ); - } - break; - case DB_LOCK_MEM: - if (newmem < howmuch * 2) { - incr += howmuch * 2 - newmem; - newmem = howmuch * 2; - } - break; - } - - newmem += ALIGN(incr, sizeof(size_t)) - incr; - incr = ALIGN(incr, sizeof(size_t)); - - /* - * Since we are going to be allocating locks at the beginning of the - * new chunk, we need to make sure that the chunk is MUTEX_ALIGNMENT - * aligned. We did not guarantee this when we created the region, so - * we may need to pad the old region by extra bytes to ensure this - * alignment. - */ - incr += ALIGN(oldsize, MUTEX_ALIGNMENT) - oldsize; - - __db_err(lt->dbenv, - "Growing lock region: %lu locks %lu objs %lu bytes", - (u_long)newlocks, (u_long)newobjs, (u_long)newmem); - - if ((ret = __db_rgrow(<->reginfo, oldsize + incr)) != 0) - return (ret); - lt->region = lt->reginfo.addr; - __lock_reset_region(lt); - - /* Update region parameters. */ - lrp = lt->region; - lrp->increment = incr << 1; - lrp->maxlocks += newlocks; - lrp->numobjs += newobjs; - lrp->mem_bytes += newmem; - - curaddr = (u_int8_t *)lrp + oldsize; - curaddr = (u_int8_t *)ALIGNP(curaddr, MUTEX_ALIGNMENT); - - /* Put new locks onto the free list. */ - lock_head = &lrp->free_locks; - for (i = 0; i++ < newlocks; - curaddr += ALIGN(sizeof(struct __db_lock), MUTEX_ALIGNMENT)) { - newl = (struct __db_lock *)curaddr; - SH_TAILQ_INSERT_HEAD(lock_head, newl, links, __db_lock); - } - - /* Put new objects onto the free list. */ - obj_head = &lrp->free_objs; - for (i = 0; i++ < newobjs; curaddr += sizeof(DB_LOCKOBJ)) { - op = (DB_LOCKOBJ *)curaddr; - SH_TAILQ_INSERT_HEAD(obj_head, op, links, __db_lockobj); - } - - *((size_t *)curaddr) = newmem - sizeof(size_t); - curaddr += sizeof(size_t); - __db_shalloc_free(lt->mem, curaddr); - - return (0); -} - -static void -__lock_reset_region(lt) - DB_LOCKTAB *lt; -{ - lt->conflicts = (u_int8_t *)lt->region + sizeof(DB_LOCKREGION); - lt->hashtab = - (DB_HASHTAB *)((u_int8_t *)lt->region + lt->region->hash_off); - lt->mem = (void *)((u_int8_t *)lt->region + lt->region->mem_off); -} - -/* - * lock_stat -- - * Return LOCK statistics. - */ -int -lock_stat(lt, gspp, db_malloc) - DB_LOCKTAB *lt; - DB_LOCK_STAT **gspp; - void *(*db_malloc) __P((size_t)); -{ - DB_LOCKREGION *rp; - int ret; - - *gspp = NULL; - - LOCK_PANIC_CHECK(lt); - - if ((ret = __os_malloc(sizeof(**gspp), db_malloc, gspp)) != 0) - return (ret); - - /* Copy out the global statistics. */ - LOCK_LOCKREGION(lt); - - rp = lt->region; - (*gspp)->st_magic = rp->magic; - (*gspp)->st_version = rp->version; - (*gspp)->st_maxlocks = rp->maxlocks; - (*gspp)->st_nmodes = rp->nmodes; - (*gspp)->st_numobjs = rp->numobjs; - (*gspp)->st_nlockers = rp->nlockers; - (*gspp)->st_nconflicts = rp->nconflicts; - (*gspp)->st_nrequests = rp->nrequests; - (*gspp)->st_nreleases = rp->nreleases; - (*gspp)->st_ndeadlocks = rp->ndeadlocks; - (*gspp)->st_region_nowait = rp->hdr.lock.mutex_set_nowait; - (*gspp)->st_region_wait = rp->hdr.lock.mutex_set_wait; - (*gspp)->st_refcnt = rp->hdr.refcnt; - (*gspp)->st_regsize = rp->hdr.size; - - UNLOCK_LOCKREGION(lt); - - return (0); -} - -static u_int32_t -__lock_count_locks(lrp) - DB_LOCKREGION *lrp; -{ - struct __db_lock *newl; - u_int32_t count; - - count = 0; - for (newl = SH_TAILQ_FIRST(&lrp->free_locks, __db_lock); - newl != NULL; - newl = SH_TAILQ_NEXT(newl, links, __db_lock)) - count++; - - return (count); -} - -static u_int32_t -__lock_count_objs(lrp) - DB_LOCKREGION *lrp; -{ - DB_LOCKOBJ *obj; - u_int32_t count; - - count = 0; - for (obj = SH_TAILQ_FIRST(&lrp->free_objs, __db_lockobj); - obj != NULL; - obj = SH_TAILQ_NEXT(obj, links, __db_lockobj)) - count++; - - return (count); -} - -#define LOCK_DUMP_CONF 0x001 /* Conflict matrix. */ -#define LOCK_DUMP_FREE 0x002 /* Display lock free list. */ -#define LOCK_DUMP_LOCKERS 0x004 /* Display lockers. */ -#define LOCK_DUMP_MEM 0x008 /* Display region memory. */ -#define LOCK_DUMP_OBJECTS 0x010 /* Display objects. */ -#define LOCK_DUMP_ALL 0x01f /* Display all. */ - -/* - * __lock_dump_region -- - * - * PUBLIC: void __lock_dump_region __P((DB_LOCKTAB *, char *, FILE *)); - */ -void -__lock_dump_region(lt, area, fp) - DB_LOCKTAB *lt; - char *area; - FILE *fp; -{ - struct __db_lock *lp; - DB_LOCKOBJ *op; - DB_LOCKREGION *lrp; - u_int32_t flags, i, j; - int label; - - /* Make it easy to call from the debugger. */ - if (fp == NULL) - fp = stderr; - - for (flags = 0; *area != '\0'; ++area) - switch (*area) { - case 'A': - LF_SET(LOCK_DUMP_ALL); - break; - case 'c': - LF_SET(LOCK_DUMP_CONF); - break; - case 'f': - LF_SET(LOCK_DUMP_FREE); - break; - case 'l': - LF_SET(LOCK_DUMP_LOCKERS); - break; - case 'm': - LF_SET(LOCK_DUMP_MEM); - break; - case 'o': - LF_SET(LOCK_DUMP_OBJECTS); - break; - } - - lrp = lt->region; - - fprintf(fp, "%s\nLock region parameters\n", DB_LINE); - fprintf(fp, "%s: %lu, %s: %lu, %s: %lu, %s: %lu\n%s: %lu, %s: %lu\n", - "table size", (u_long)lrp->table_size, - "hash_off", (u_long)lrp->hash_off, - "increment", (u_long)lrp->increment, - "mem_off", (u_long)lrp->mem_off, - "mem_bytes", (u_long)lrp->mem_bytes, - "need_dd", (u_long)lrp->need_dd); - - if (LF_ISSET(LOCK_DUMP_CONF)) { - fprintf(fp, "\n%s\nConflict matrix\n", DB_LINE); - for (i = 0; i < lrp->nmodes; i++) { - for (j = 0; j < lrp->nmodes; j++) - fprintf(fp, "%lu\t", - (u_long)lt->conflicts[i * lrp->nmodes + j]); - fprintf(fp, "\n"); - } - } - - if (LF_ISSET(LOCK_DUMP_LOCKERS | LOCK_DUMP_OBJECTS)) { - fprintf(fp, "%s\nLock hash buckets\n", DB_LINE); - for (i = 0; i < lrp->table_size; i++) { - label = 1; - for (op = SH_TAILQ_FIRST(<->hashtab[i], __db_lockobj); - op != NULL; - op = SH_TAILQ_NEXT(op, links, __db_lockobj)) { - if (LF_ISSET(LOCK_DUMP_LOCKERS) && - op->type == DB_LOCK_LOCKER) { - if (label) { - fprintf(fp, - "Bucket %lu:\n", (u_long)i); - label = 0; - } - __lock_dump_locker(lt, op, fp); - } - if (LF_ISSET(LOCK_DUMP_OBJECTS) && - op->type == DB_LOCK_OBJTYPE) { - if (label) { - fprintf(fp, - "Bucket %lu:\n", (u_long)i); - label = 0; - } - __lock_dump_object(lt, op, fp); - } - } - } - } - - if (LF_ISSET(LOCK_DUMP_FREE)) { - fprintf(fp, "%s\nLock free list\n", DB_LINE); - for (lp = SH_TAILQ_FIRST(&lrp->free_locks, __db_lock); - lp != NULL; - lp = SH_TAILQ_NEXT(lp, links, __db_lock)) - fprintf(fp, "0x%lx: %lu\t%lu\t%s\t0x%lx\n", (u_long)lp, - (u_long)lp->holder, (u_long)lp->mode, - __lock_dump_status(lp->status), (u_long)lp->obj); - - fprintf(fp, "%s\nObject free list\n", DB_LINE); - for (op = SH_TAILQ_FIRST(&lrp->free_objs, __db_lockobj); - op != NULL; - op = SH_TAILQ_NEXT(op, links, __db_lockobj)) - fprintf(fp, "0x%lx\n", (u_long)op); - } - - if (LF_ISSET(LOCK_DUMP_MEM)) - __db_shalloc_dump(lt->mem, fp); -} - -static void -__lock_dump_locker(lt, op, fp) - DB_LOCKTAB *lt; - DB_LOCKOBJ *op; - FILE *fp; -{ - struct __db_lock *lp; - u_int32_t locker; - void *ptr; - - ptr = SH_DBT_PTR(&op->lockobj); - memcpy(&locker, ptr, sizeof(u_int32_t)); - fprintf(fp, "L %lx", (u_long)locker); - - lp = SH_LIST_FIRST(&op->heldby, __db_lock); - if (lp == NULL) { - fprintf(fp, "\n"); - return; - } - for (; lp != NULL; lp = SH_LIST_NEXT(lp, locker_links, __db_lock)) - __lock_printlock(lt, lp, 0); -} - -static void -__lock_dump_object(lt, op, fp) - DB_LOCKTAB *lt; - DB_LOCKOBJ *op; - FILE *fp; -{ - struct __db_lock *lp; - u_int32_t j; - u_int8_t *ptr; - u_int ch; - - ptr = SH_DBT_PTR(&op->lockobj); - for (j = 0; j < op->lockobj.size; ptr++, j++) { - ch = *ptr; - fprintf(fp, isprint(ch) ? "%c" : "\\%o", ch); - } - fprintf(fp, "\n"); - - fprintf(fp, "H:"); - for (lp = - SH_TAILQ_FIRST(&op->holders, __db_lock); - lp != NULL; - lp = SH_TAILQ_NEXT(lp, links, __db_lock)) - __lock_printlock(lt, lp, 0); - lp = SH_TAILQ_FIRST(&op->waiters, __db_lock); - if (lp != NULL) { - fprintf(fp, "\nW:"); - for (; lp != NULL; lp = SH_TAILQ_NEXT(lp, links, __db_lock)) - __lock_printlock(lt, lp, 0); - } -} - -static const char * -__lock_dump_status(status) - db_status_t status; -{ - switch (status) { - case DB_LSTAT_ABORTED: - return ("aborted"); - case DB_LSTAT_ERR: - return ("err"); - case DB_LSTAT_FREE: - return ("free"); - case DB_LSTAT_HELD: - return ("held"); - case DB_LSTAT_NOGRANT: - return ("nogrant"); - case DB_LSTAT_PENDING: - return ("pending"); - case DB_LSTAT_WAITING: - return ("waiting"); - } - return ("unknown status"); -} diff --git a/db2/lock/lock_util.c b/db2/lock/lock_util.c deleted file mode 100644 index 29da75b8a8..0000000000 --- a/db2/lock/lock_util.c +++ /dev/null @@ -1,152 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996, 1997, 1998 - * Sleepycat Software. All rights reserved. - */ - -#include "config.h" - -#ifndef lint -static const char sccsid[] = "@(#)lock_util.c 10.10 (Sleepycat) 9/20/98"; -#endif /* not lint */ - -#ifndef NO_SYSTEM_INCLUDES -#include <sys/types.h> - -#include <string.h> -#endif - -#include "db_int.h" -#include "shqueue.h" -#include "db_page.h" -#include "db_shash.h" -#include "hash.h" -#include "lock.h" - -/* - * __lock_cmp -- - * This function is used to compare a DBT that is about to be entered - * into a hash table with an object already in the hash table. Note - * that it just returns true on equal and 0 on not-equal. Therefore - * this function cannot be used as a sort function; its purpose is to - * be used as a hash comparison function. - * - * PUBLIC: int __lock_cmp __P((const DBT *, DB_LOCKOBJ *)); - */ -int -__lock_cmp(dbt, lock_obj) - const DBT *dbt; - DB_LOCKOBJ *lock_obj; -{ - void *obj_data; - - if (lock_obj->type != DB_LOCK_OBJTYPE) - return (0); - - obj_data = SH_DBT_PTR(&lock_obj->lockobj); - return (dbt->size == lock_obj->lockobj.size && - memcmp(dbt->data, obj_data, dbt->size) == 0); -} - -/* - * PUBLIC: int __lock_locker_cmp __P((u_int32_t, DB_LOCKOBJ *)); - */ -int -__lock_locker_cmp(locker, lock_obj) - u_int32_t locker; - DB_LOCKOBJ *lock_obj; -{ - void *obj_data; - - if (lock_obj->type != DB_LOCK_LOCKER) - return (0); - - obj_data = SH_DBT_PTR(&lock_obj->lockobj); - return (memcmp(&locker, obj_data, sizeof(u_int32_t)) == 0); -} - -/* - * The next two functions are the hash functions used to store objects in the - * lock hash table. They are hashing the same items, but one (__lock_ohash) - * takes a DBT (used for hashing a parameter passed from the user) and the - * other (__lock_lhash) takes a DB_LOCKOBJ (used for hashing something that is - * already in the lock manager). In both cases, we have a special check to - * fast path the case where we think we are doing a hash on a DB page/fileid - * pair. If the size is right, then we do the fast hash. - * - * We know that DB uses DB_LOCK_ILOCK types for its lock objects. The first - * four bytes are the 4-byte page number and the next DB_FILE_ID_LEN bytes - * are a unique file id, where the first 4 bytes on UNIX systems are the file - * inode number, and the first 4 bytes on Windows systems are the FileIndexLow - * bytes. So, we use the XOR of the page number and the first four bytes of - * the file id to produce a 32-bit hash value. - * - * We have no particular reason to believe that this algorithm will produce - * a good hash, but we want a fast hash more than we want a good one, when - * we're coming through this code path. - */ -#define FAST_HASH(P) { \ - u_int32_t __h; \ - u_int8_t *__cp, *__hp; \ - __hp = (u_int8_t *)&__h; \ - __cp = (u_int8_t *)(P); \ - __hp[0] = __cp[0] ^ __cp[4]; \ - __hp[1] = __cp[1] ^ __cp[5]; \ - __hp[2] = __cp[2] ^ __cp[6]; \ - __hp[3] = __cp[3] ^ __cp[7]; \ - return (__h); \ -} - -/* - * __lock_ohash -- - * - * PUBLIC: u_int32_t __lock_ohash __P((const DBT *)); - */ -u_int32_t -__lock_ohash(dbt) - const DBT *dbt; -{ - if (dbt->size == sizeof(DB_LOCK_ILOCK)) - FAST_HASH(dbt->data); - - return (__ham_func5(dbt->data, dbt->size)); -} - -/* - * __lock_lhash -- - * - * PUBLIC: u_int32_t __lock_lhash __P((DB_LOCKOBJ *)); - */ -u_int32_t -__lock_lhash(lock_obj) - DB_LOCKOBJ *lock_obj; -{ - u_int32_t tmp; - void *obj_data; - - obj_data = SH_DBT_PTR(&lock_obj->lockobj); - if (lock_obj->type == DB_LOCK_LOCKER) { - memcpy(&tmp, obj_data, sizeof(u_int32_t)); - return (tmp); - } - - if (lock_obj->lockobj.size == sizeof(DB_LOCK_ILOCK)) - FAST_HASH(obj_data); - - return (__ham_func5(obj_data, lock_obj->lockobj.size)); -} - -/* - * __lock_locker_hash -- - * Hash function for entering lockers into the hash table. Since these - * are simply 32-bit unsigned integers, just return the locker value. - * - * PUBLIC: u_int32_t __lock_locker_hash __P((u_int32_t)); - */ -u_int32_t -__lock_locker_hash(locker) - u_int32_t locker; -{ - return (locker); -} |