1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
|
/*-
* See the file LICENSE for redistribution information.
*
* Copyright (c) 1996, 1997, 1998
* Sleepycat Software. All rights reserved.
*/
#include "config.h"
#ifndef lint
static const char sccsid[] = "@(#)lock.c 10.52 (Sleepycat) 5/10/98";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
#include <errno.h>
#include <string.h>
#endif
#include "db_int.h"
#include "shqueue.h"
#include "db_page.h"
#include "db_shash.h"
#include "lock.h"
#include "common_ext.h"
#include "db_am.h"
static void __lock_checklocker __P((DB_LOCKTAB *, struct __db_lock *, int));
static void __lock_freeobj __P((DB_LOCKTAB *, DB_LOCKOBJ *));
static int __lock_get_internal __P((DB_LOCKTAB *, u_int32_t, u_int32_t,
const DBT *, db_lockmode_t, struct __db_lock **));
static int __lock_put_internal __P((DB_LOCKTAB *, struct __db_lock *, int));
static void __lock_remove_waiter
__P((DB_LOCKTAB *, DB_LOCKOBJ *, struct __db_lock *, db_status_t));
int
lock_id(lt, idp)
DB_LOCKTAB *lt;
u_int32_t *idp;
{
u_int32_t id;
LOCK_LOCKREGION(lt);
if (lt->region->id >= DB_LOCK_MAXID)
lt->region->id = 0;
id = ++lt->region->id;
UNLOCK_LOCKREGION(lt);
*idp = id;
return (0);
}
int
lock_vec(lt, locker, flags, list, nlist, elistp)
DB_LOCKTAB *lt;
u_int32_t locker, flags;
int nlist;
DB_LOCKREQ *list, **elistp;
{
struct __db_lock *lp;
DB_LOCKOBJ *sh_obj, *sh_locker;
int i, ret, run_dd;
/* Validate arguments. */
if ((ret =
__db_fchk(lt->dbenv, "lock_vec", flags, DB_LOCK_NOWAIT)) != 0)
return (ret);
LOCK_LOCKREGION(lt);
if ((ret = __lock_validate_region(lt)) != 0) {
UNLOCK_LOCKREGION(lt);
return (ret);
}
ret = 0;
for (i = 0; i < nlist && ret == 0; i++) {
switch (list[i].op) {
case DB_LOCK_GET:
ret = __lock_get_internal(lt, locker, flags,
list[i].obj, list[i].mode, &lp);
if (ret == 0) {
list[i].lock = LOCK_TO_OFFSET(lt, lp);
lt->region->nrequests++;
}
break;
case DB_LOCK_PUT:
lp = OFFSET_TO_LOCK(lt, list[i].lock);
if (lp->holder != locker) {
ret = DB_LOCK_NOTHELD;
break;
}
list[i].mode = lp->mode;
/* XXX Need to copy the object. ??? */
ret = __lock_put_internal(lt, lp, 0);
break;
case DB_LOCK_PUT_ALL:
/* Find the locker. */
if ((ret = __lock_getobj(lt, locker,
NULL, DB_LOCK_LOCKER, &sh_locker)) != 0)
break;
for (lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock);
lp != NULL;
lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock)) {
if ((ret = __lock_put_internal(lt, lp, 1)) != 0)
break;
}
__lock_freeobj(lt, sh_locker);
lt->region->nlockers--;
break;
case DB_LOCK_PUT_OBJ:
/* Look up the object in the hash table. */
HASHLOOKUP(lt->hashtab, __db_lockobj, links,
list[i].obj, sh_obj, lt->region->table_size,
__lock_ohash, __lock_cmp);
if (sh_obj == NULL) {
ret = EINVAL;
break;
}
/*
* Release waiters first, because they won't cause
* anyone else to be awakened. If we release the
* lockers first, all the waiters get awakened
* needlessly.
*/
for (lp = SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock);
lp != NULL;
lp = SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock)) {
lt->region->nreleases += lp->refcount;
__lock_remove_waiter(lt, sh_obj, lp,
DB_LSTAT_NOGRANT);
__lock_checklocker(lt, lp, 1);
}
for (lp = SH_TAILQ_FIRST(&sh_obj->holders, __db_lock);
lp != NULL;
lp = SH_TAILQ_FIRST(&sh_obj->holders, __db_lock)) {
lt->region->nreleases += lp->refcount;
SH_LIST_REMOVE(lp, locker_links, __db_lock);
SH_TAILQ_REMOVE(&sh_obj->holders, lp, links,
__db_lock);
lp->status = DB_LSTAT_FREE;
SH_TAILQ_INSERT_HEAD(<->region->free_locks,
lp, links, __db_lock);
}
/* Now free the object. */
__lock_freeobj(lt, sh_obj);
break;
#ifdef DEBUG
case DB_LOCK_DUMP:
/* Find the locker. */
if ((ret = __lock_getobj(lt, locker,
NULL, DB_LOCK_LOCKER, &sh_locker)) != 0)
break;
for (lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock);
lp != NULL;
lp = SH_LIST_NEXT(lp, locker_links, __db_lock)) {
__lock_printlock(lt, lp, 1);
ret = EINVAL;
}
if (ret == 0) {
__lock_freeobj(lt, sh_locker);
lt->region->nlockers--;
}
break;
#endif
default:
ret = EINVAL;
break;
}
}
if (lt->region->need_dd && lt->region->detect != DB_LOCK_NORUN) {
run_dd = 1;
lt->region->need_dd = 0;
} else
run_dd = 0;
UNLOCK_LOCKREGION(lt);
if (ret == 0 && run_dd)
lock_detect(lt, 0, lt->region->detect);
if (elistp && ret != 0)
*elistp = &list[i - 1];
return (ret);
}
int
lock_get(lt, locker, flags, obj, lock_mode, lock)
DB_LOCKTAB *lt;
u_int32_t locker, flags;
const DBT *obj;
db_lockmode_t lock_mode;
DB_LOCK *lock;
{
struct __db_lock *lockp;
int ret;
/* Validate arguments. */
if ((ret =
__db_fchk(lt->dbenv, "lock_get", flags, DB_LOCK_NOWAIT)) != 0)
return (ret);
LOCK_LOCKREGION(lt);
ret = __lock_validate_region(lt);
if (ret == 0 && (ret = __lock_get_internal(lt,
locker, flags, obj, lock_mode, &lockp)) == 0) {
*lock = LOCK_TO_OFFSET(lt, lockp);
lt->region->nrequests++;
}
UNLOCK_LOCKREGION(lt);
return (ret);
}
int
lock_put(lt, lock)
DB_LOCKTAB *lt;
DB_LOCK lock;
{
struct __db_lock *lockp;
int ret, run_dd;
LOCK_LOCKREGION(lt);
if ((ret = __lock_validate_region(lt)) != 0)
return (ret);
else {
lockp = OFFSET_TO_LOCK(lt, lock);
ret = __lock_put_internal(lt, lockp, 0);
}
__lock_checklocker(lt, lockp, 0);
if (lt->region->need_dd && lt->region->detect != DB_LOCK_NORUN) {
run_dd = 1;
lt->region->need_dd = 0;
} else
run_dd = 0;
UNLOCK_LOCKREGION(lt);
if (ret == 0 && run_dd)
lock_detect(lt, 0, lt->region->detect);
return (ret);
}
static int
__lock_put_internal(lt, lockp, do_all)
DB_LOCKTAB *lt;
struct __db_lock *lockp;
int do_all;
{
struct __db_lock *lp_w, *lp_h, *next_waiter;
DB_LOCKOBJ *sh_obj;
int state_changed;
if (lockp->refcount == 0 || (lockp->status != DB_LSTAT_HELD &&
lockp->status != DB_LSTAT_WAITING) || lockp->obj == 0) {
__db_err(lt->dbenv, "lock_put: invalid lock %lu",
(u_long)((u_int8_t *)lockp - (u_int8_t *)lt->region));
return (EINVAL);
}
if (do_all)
lt->region->nreleases += lockp->refcount;
else
lt->region->nreleases++;
if (do_all == 0 && lockp->refcount > 1) {
lockp->refcount--;
return (0);
}
/* Get the object associated with this lock. */
sh_obj = (DB_LOCKOBJ *)((u_int8_t *)lockp + lockp->obj);
/* Remove lock from locker list. */
SH_LIST_REMOVE(lockp, locker_links, __db_lock);
/* Remove this lock from its holders/waitlist. */
if (lockp->status != DB_LSTAT_HELD)
__lock_remove_waiter(lt, sh_obj, lockp, DB_LSTAT_FREE);
else
SH_TAILQ_REMOVE(&sh_obj->holders, lockp, links, __db_lock);
/*
* We need to do lock promotion. We also need to determine if
* we're going to need to run the deadlock detector again. If
* we release locks, and there are waiters, but no one gets promoted,
* then we haven't fundamentally changed the lockmgr state, so
* we may still have a deadlock and we have to run again. However,
* if there were no waiters, or we actually promoted someone, then
* we are OK and we don't have to run it immediately.
*/
for (lp_w = SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock),
state_changed = lp_w == NULL;
lp_w != NULL;
lp_w = next_waiter) {
next_waiter = SH_TAILQ_NEXT(lp_w, links, __db_lock);
for (lp_h = SH_TAILQ_FIRST(&sh_obj->holders, __db_lock);
lp_h != NULL;
lp_h = SH_TAILQ_NEXT(lp_h, links, __db_lock)) {
if (CONFLICTS(lt, lp_h->mode, lp_w->mode) &&
lp_h->holder != lp_w->holder)
break;
}
if (lp_h != NULL) /* Found a conflict. */
break;
/* No conflict, promote the waiting lock. */
SH_TAILQ_REMOVE(&sh_obj->waiters, lp_w, links, __db_lock);
lp_w->status = DB_LSTAT_PENDING;
SH_TAILQ_INSERT_TAIL(&sh_obj->holders, lp_w, links);
/* Wake up waiter. */
(void)__db_mutex_unlock(&lp_w->mutex, lt->reginfo.fd);
state_changed = 1;
}
/* Check if object should be reclaimed. */
if (SH_TAILQ_FIRST(&sh_obj->holders, __db_lock) == NULL) {
HASHREMOVE_EL(lt->hashtab, __db_lockobj,
links, sh_obj, lt->region->table_size, __lock_lhash);
if (sh_obj->lockobj.size > sizeof(sh_obj->objdata))
__db_shalloc_free(lt->mem,
SH_DBT_PTR(&sh_obj->lockobj));
SH_TAILQ_INSERT_HEAD(<->region->free_objs, sh_obj, links,
__db_lockobj);
state_changed = 1;
}
/* Free lock. */
lockp->status = DB_LSTAT_FREE;
SH_TAILQ_INSERT_HEAD(<->region->free_locks, lockp, links, __db_lock);
/*
* If we did not promote anyone; we need to run the deadlock
* detector again.
*/
if (state_changed == 0)
lt->region->need_dd = 1;
return (0);
}
static int
__lock_get_internal(lt, locker, flags, obj, lock_mode, lockp)
DB_LOCKTAB *lt;
u_int32_t locker, flags;
const DBT *obj;
db_lockmode_t lock_mode;
struct __db_lock **lockp;
{
struct __db_lock *newl, *lp;
DB_LOCKOBJ *sh_obj, *sh_locker;
DB_LOCKREGION *lrp;
size_t newl_off;
int ihold, ret;
ret = 0;
/*
* Check that lock mode is valid.
*/
lrp = lt->region;
if ((u_int32_t)lock_mode >= lrp->nmodes) {
__db_err(lt->dbenv,
"lock_get: invalid lock mode %lu\n", (u_long)lock_mode);
return (EINVAL);
}
/* Allocate a new lock. Optimize for the common case of a grant. */
if ((newl = SH_TAILQ_FIRST(&lrp->free_locks, __db_lock)) == NULL) {
if ((ret = __lock_grow_region(lt, DB_LOCK_LOCK, 0)) != 0)
return (ret);
lrp = lt->region;
newl = SH_TAILQ_FIRST(&lrp->free_locks, __db_lock);
}
newl_off = LOCK_TO_OFFSET(lt, newl);
/* Optimize for common case of granting a lock. */
SH_TAILQ_REMOVE(&lrp->free_locks, newl, links, __db_lock);
newl->mode = lock_mode;
newl->status = DB_LSTAT_HELD;
newl->holder = locker;
newl->refcount = 1;
if ((ret = __lock_getobj(lt, 0, obj, DB_LOCK_OBJTYPE, &sh_obj)) != 0)
return (ret);
lrp = lt->region; /* getobj might have grown */
newl = OFFSET_TO_LOCK(lt, newl_off);
/* Now make new lock point to object */
newl->obj = SH_PTR_TO_OFF(newl, sh_obj);
/*
* Now we have a lock and an object and we need to see if we should
* grant the lock. We use a FIFO ordering so we can only grant a
* new lock if it does not conflict with anyone on the holders list
* OR anyone on the waiters list. The reason that we don't grant if
* there's a conflict is that this can lead to starvation (a writer
* waiting on a popularly read item will never be granted). The
* downside of this is that a waiting reader can prevent an upgrade
* from reader to writer, which is not uncommon.
*
* There is one exception to the no-conflict rule. If a lock is held
* by the requesting locker AND the new lock does not conflict with
* any other holders, then we grant the lock. The most common place
* this happens is when the holder has a WRITE lock and a READ lock
* request comes in for the same locker. If we do not grant the read
* lock, then we guarantee deadlock.
*
* In case of conflict, we put the new lock on the end of the waiters
* list.
*/
ihold = 0;
for (lp = SH_TAILQ_FIRST(&sh_obj->holders, __db_lock);
lp != NULL;
lp = SH_TAILQ_NEXT(lp, links, __db_lock)) {
if (locker == lp->holder) {
if (lp->mode == lock_mode &&
lp->status == DB_LSTAT_HELD) {
/* Lock is held, just inc the ref count. */
lp->refcount++;
SH_TAILQ_INSERT_HEAD(&lrp->free_locks,
newl, links, __db_lock);
*lockp = lp;
return (0);
} else
ihold = 1;
} else if (CONFLICTS(lt, lp->mode, lock_mode))
break;
}
if (lp == NULL && !ihold)
for (lp = SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock);
lp != NULL;
lp = SH_TAILQ_NEXT(lp, links, __db_lock)) {
if (CONFLICTS(lt, lp->mode, lock_mode) &&
locker != lp->holder)
break;
}
if (lp == NULL)
SH_TAILQ_INSERT_TAIL(&sh_obj->holders, newl, links);
else if (!(flags & DB_LOCK_NOWAIT))
SH_TAILQ_INSERT_TAIL(&sh_obj->waiters, newl, links);
else {
/* Free the lock and return an error. */
newl->status = DB_LSTAT_FREE;
SH_TAILQ_INSERT_HEAD(&lrp->free_locks, newl, links, __db_lock);
return (DB_LOCK_NOTGRANTED);
}
/*
* This is really a blocker for the process, so initialize it
* set. That way the current process will block when it tries
* to get it and the waking process will release it.
*/
(void)__db_mutex_init(&newl->mutex,
MUTEX_LOCK_OFFSET(lt->region, &newl->mutex));
(void)__db_mutex_lock(&newl->mutex, lt->reginfo.fd);
/*
* Now, insert the lock onto its locker's list.
*/
if ((ret =
__lock_getobj(lt, locker, NULL, DB_LOCK_LOCKER, &sh_locker)) != 0)
return (ret);
lrp = lt->region;
SH_LIST_INSERT_HEAD(&sh_locker->heldby, newl, locker_links, __db_lock);
if (lp != NULL) {
newl->status = DB_LSTAT_WAITING;
lrp->nconflicts++;
/*
* We are about to wait; must release the region mutex.
* Then, when we wakeup, we need to reacquire the region
* mutex before continuing.
*/
if (lrp->detect == DB_LOCK_NORUN)
lt->region->need_dd = 1;
UNLOCK_LOCKREGION(lt);
/*
* We are about to wait; before waiting, see if the deadlock
* detector should be run.
*/
if (lrp->detect != DB_LOCK_NORUN)
ret = lock_detect(lt, 0, lrp->detect);
(void)__db_mutex_lock(&newl->mutex, lt->reginfo.fd);
LOCK_LOCKREGION(lt);
if (newl->status != DB_LSTAT_PENDING) {
/* Return to free list. */
__lock_checklocker(lt, newl, 0);
SH_TAILQ_INSERT_HEAD(&lrp->free_locks, newl, links,
__db_lock);
switch (newl->status) {
case DB_LSTAT_ABORTED:
ret = DB_LOCK_DEADLOCK;
break;
case DB_LSTAT_NOGRANT:
ret = DB_LOCK_NOTGRANTED;
break;
default:
ret = EINVAL;
break;
}
newl->status = DB_LSTAT_FREE;
newl = NULL;
} else
newl->status = DB_LSTAT_HELD;
}
*lockp = newl;
return (ret);
}
/*
* __lock_is_locked --
*
* PUBLIC: int __lock_is_locked
* PUBLIC: __P((DB_LOCKTAB *, u_int32_t, DBT *, db_lockmode_t));
*/
int
__lock_is_locked(lt, locker, dbt, mode)
DB_LOCKTAB *lt;
u_int32_t locker;
DBT *dbt;
db_lockmode_t mode;
{
struct __db_lock *lp;
DB_LOCKOBJ *sh_obj;
DB_LOCKREGION *lrp;
lrp = lt->region;
/* Look up the object in the hash table. */
HASHLOOKUP(lt->hashtab, __db_lockobj, links,
dbt, sh_obj, lrp->table_size, __lock_ohash, __lock_cmp);
if (sh_obj == NULL)
return (0);
for (lp = SH_TAILQ_FIRST(&sh_obj->holders, __db_lock);
lp != NULL;
lp = SH_TAILQ_FIRST(&sh_obj->holders, __db_lock)) {
if (lp->holder == locker && lp->mode == mode)
return (1);
}
return (0);
}
/*
* __lock_printlock --
*
* PUBLIC: void __lock_printlock __P((DB_LOCKTAB *, struct __db_lock *, int));
*/
void
__lock_printlock(lt, lp, ispgno)
DB_LOCKTAB *lt;
struct __db_lock *lp;
int ispgno;
{
DB_LOCKOBJ *lockobj;
db_pgno_t pgno;
size_t obj;
u_int8_t *ptr;
const char *mode, *status;
switch (lp->mode) {
case DB_LOCK_IREAD:
mode = "IREAD";
break;
case DB_LOCK_IWR:
mode = "IWR";
break;
case DB_LOCK_IWRITE:
mode = "IWRITE";
break;
case DB_LOCK_NG:
mode = "NG";
break;
case DB_LOCK_READ:
mode = "READ";
break;
case DB_LOCK_WRITE:
mode = "WRITE";
break;
default:
mode = "UNKNOWN";
break;
}
switch (lp->status) {
case DB_LSTAT_ABORTED:
status = "ABORT";
break;
case DB_LSTAT_ERR:
status = "ERROR";
break;
case DB_LSTAT_FREE:
status = "FREE";
break;
case DB_LSTAT_HELD:
status = "HELD";
break;
case DB_LSTAT_NOGRANT:
status = "NONE";
break;
case DB_LSTAT_WAITING:
status = "WAIT";
break;
case DB_LSTAT_PENDING:
status = "PENDING";
break;
default:
status = "UNKNOWN";
break;
}
printf("\t%lx\t%s\t%lu\t%s\t",
(u_long)lp->holder, mode, (u_long)lp->refcount, status);
lockobj = (DB_LOCKOBJ *)((u_int8_t *)lp + lp->obj);
ptr = SH_DBT_PTR(&lockobj->lockobj);
if (ispgno) {
/* Assume this is a DBT lock. */
memcpy(&pgno, ptr, sizeof(db_pgno_t));
printf("page %lu\n", (u_long)pgno);
} else {
obj = (u_int8_t *)lp + lp->obj - (u_int8_t *)lt->region;
printf("0x%lx ", (u_long)obj);
__db_pr(ptr, lockobj->lockobj.size);
printf("\n");
}
}
/*
* PUBLIC: int __lock_getobj __P((DB_LOCKTAB *,
* PUBLIC: u_int32_t, const DBT *, u_int32_t type, DB_LOCKOBJ **));
*/
int
__lock_getobj(lt, locker, dbt, type, objp)
DB_LOCKTAB *lt;
u_int32_t locker, type;
const DBT *dbt;
DB_LOCKOBJ **objp;
{
DB_LOCKREGION *lrp;
DB_LOCKOBJ *sh_obj;
u_int32_t obj_size;
int ret;
void *p, *src;
lrp = lt->region;
/* Look up the object in the hash table. */
if (type == DB_LOCK_OBJTYPE) {
HASHLOOKUP(lt->hashtab, __db_lockobj, links, dbt, sh_obj,
lrp->table_size, __lock_ohash, __lock_cmp);
obj_size = dbt->size;
} else {
HASHLOOKUP(lt->hashtab, __db_lockobj, links, locker,
sh_obj, lrp->table_size, __lock_locker_hash,
__lock_locker_cmp);
obj_size = sizeof(locker);
}
/*
* If we found the object, then we can just return it. If
* we didn't find the object, then we need to create it.
*/
if (sh_obj == NULL) {
/* Create new object and then insert it into hash table. */
if ((sh_obj =
SH_TAILQ_FIRST(&lrp->free_objs, __db_lockobj)) == NULL) {
if ((ret = __lock_grow_region(lt, DB_LOCK_OBJ, 0)) != 0)
return (ret);
lrp = lt->region;
sh_obj = SH_TAILQ_FIRST(&lrp->free_objs, __db_lockobj);
}
/*
* If we can fit this object in the structure, do so instead
* of shalloc-ing space for it.
*/
if (obj_size <= sizeof(sh_obj->objdata))
p = sh_obj->objdata;
else
if ((ret =
__db_shalloc(lt->mem, obj_size, 0, &p)) != 0) {
if ((ret = __lock_grow_region(lt,
DB_LOCK_MEM, obj_size)) != 0)
return (ret);
lrp = lt->region;
/* Reacquire the head of the list. */
sh_obj = SH_TAILQ_FIRST(&lrp->free_objs,
__db_lockobj);
(void)__db_shalloc(lt->mem, obj_size, 0, &p);
}
src = type == DB_LOCK_OBJTYPE ? dbt->data : (void *)&locker;
memcpy(p, src, obj_size);
sh_obj->type = type;
SH_TAILQ_REMOVE(&lrp->free_objs, sh_obj, links, __db_lockobj);
SH_TAILQ_INIT(&sh_obj->waiters);
if (type == DB_LOCK_LOCKER)
SH_LIST_INIT(&sh_obj->heldby);
else
SH_TAILQ_INIT(&sh_obj->holders);
sh_obj->lockobj.size = obj_size;
sh_obj->lockobj.off = SH_PTR_TO_OFF(&sh_obj->lockobj, p);
HASHINSERT(lt->hashtab,
__db_lockobj, links, sh_obj, lrp->table_size, __lock_lhash);
if (type == DB_LOCK_LOCKER)
lrp->nlockers++;
}
*objp = sh_obj;
return (0);
}
/*
* Any lock on the waitlist has a process waiting for it. Therefore, we
* can't return the lock to the freelist immediately. Instead, we can
* remove the lock from the list of waiters, set the status field of the
* lock, and then let the process waking up return the lock to the
* free list.
*/
static void
__lock_remove_waiter(lt, sh_obj, lockp, status)
DB_LOCKTAB *lt;
DB_LOCKOBJ *sh_obj;
struct __db_lock *lockp;
db_status_t status;
{
SH_TAILQ_REMOVE(&sh_obj->waiters, lockp, links, __db_lock);
lockp->status = status;
/* Wake whoever is waiting on this lock. */
(void)__db_mutex_unlock(&lockp->mutex, lt->reginfo.fd);
}
static void
__lock_checklocker(lt, lockp, do_remove)
DB_LOCKTAB *lt;
struct __db_lock *lockp;
int do_remove;
{
DB_LOCKOBJ *sh_locker;
if (do_remove)
SH_LIST_REMOVE(lockp, locker_links, __db_lock);
/* if the locker list is NULL, free up the object. */
if (__lock_getobj(lt, lockp->holder, NULL, DB_LOCK_LOCKER, &sh_locker)
== 0 && SH_LIST_FIRST(&sh_locker->heldby, __db_lock) == NULL) {
__lock_freeobj(lt, sh_locker);
lt->region->nlockers--;
}
}
static void
__lock_freeobj(lt, obj)
DB_LOCKTAB *lt;
DB_LOCKOBJ *obj;
{
HASHREMOVE_EL(lt->hashtab,
__db_lockobj, links, obj, lt->region->table_size, __lock_lhash);
if (obj->lockobj.size > sizeof(obj->objdata))
__db_shalloc_free(lt->mem, SH_DBT_PTR(&obj->lockobj));
SH_TAILQ_INSERT_HEAD(<->region->free_objs, obj, links, __db_lockobj);
}
|