aboutsummaryrefslogtreecommitdiff
path: root/nscd/initgrcache.c
diff options
context:
space:
mode:
Diffstat (limited to 'nscd/initgrcache.c')
-rw-r--r--nscd/initgrcache.c391
1 files changed, 391 insertions, 0 deletions
diff --git a/nscd/initgrcache.c b/nscd/initgrcache.c
new file mode 100644
index 0000000000..124b4bae62
--- /dev/null
+++ b/nscd/initgrcache.c
@@ -0,0 +1,391 @@
+/* Cache handling for host lookup.
+ Copyright (C) 2004 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2004.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <assert.h>
+#include <errno.h>
+#include <grp.h>
+#include <libintl.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <dbg_log.h>
+#include <nscd.h>
+
+#include "../nss/nsswitch.h"
+
+
+/* Type of the lookup function. */
+typedef enum nss_status (*initgroups_dyn_function) (const char *, gid_t,
+ long int *, long int *,
+ gid_t **, long int, int *);
+
+
+static const initgr_response_header notfound =
+{
+ .version = NSCD_VERSION,
+ .found = 0,
+ .ngrps = 0
+};
+
+
+#include "../grp/compat-initgroups.c"
+
+
+static void
+addinitgroupsX (struct database_dyn *db, int fd, request_header *req,
+ void *key, uid_t uid, struct hashentry *he,
+ struct datahead *dh)
+{
+ /* Search for the entry matching the key. Please note that we don't
+ look again in the table whether the dataset is now available. We
+ simply insert it. It does not matter if it is in there twice. The
+ pruning function only will look at the timestamp. */
+
+
+ /* We allocate all data in one memory block: the iov vector,
+ the response header and the dataset itself. */
+ struct dataset
+ {
+ struct datahead head;
+ initgr_response_header resp;
+ char strdata[0];
+ } *dataset = NULL;
+
+ if (__builtin_expect (debug_level > 0, 0))
+ {
+ if (he == NULL)
+ dbg_log (_("Haven't found \"%s\" in group cache!"), (char *) key);
+ else
+ dbg_log (_("Reloading \"%s\" in group cache!"), (char *) key);
+ }
+
+ static service_user *group_database;
+ service_user *nip = NULL;
+ int no_more;
+
+ if (group_database != NULL)
+ {
+ nip = group_database;
+ no_more = 0;
+ }
+ else
+ no_more = __nss_database_lookup ("group", NULL,
+ "compat [NOTFOUND=return] files", &nip);
+
+ /* We always use sysconf even if NGROUPS_MAX is defined. That way, the
+ limit can be raised in the kernel configuration without having to
+ recompile libc. */
+ long int limit = __sysconf (_SC_NGROUPS_MAX);
+
+ long int size;
+ if (limit > 0)
+ /* We limit the size of the intially allocated array. */
+ size = MIN (limit, 64);
+ else
+ /* No fixed limit on groups. Pick a starting buffer size. */
+ size = 16;
+
+ long int start = 0;
+ bool all_tryagain = true;
+
+ /* This is temporary memory, we need not (ad must not) call
+ mempool_alloc. */
+ // XXX This really should use alloca. need to change the backends.
+ gid_t *groups = (gid_t *) malloc (size * sizeof (gid_t));
+ if (__builtin_expect (groups == NULL, 0))
+ /* No more memory. */
+ goto out;
+
+ /* Nothing added yet. */
+ while (! no_more)
+ {
+ enum nss_status status;
+ initgroups_dyn_function fct;
+ fct = __nss_lookup_function (nip, "initgroups_dyn");
+
+ if (fct == NULL)
+ {
+ status = compat_call (nip, key, -1, &start, &size, &groups,
+ limit, &errno);
+
+ if (nss_next_action (nip, NSS_STATUS_UNAVAIL) != NSS_ACTION_CONTINUE)
+ break;
+ }
+ else
+ status = DL_CALL_FCT (fct, (key, -1, &start, &size, &groups,
+ limit, &errno));
+
+ if (status != NSS_STATUS_TRYAGAIN)
+ all_tryagain = false;
+
+ /* This is really only for debugging. */
+ if (NSS_STATUS_TRYAGAIN > status || status > NSS_STATUS_RETURN)
+ __libc_fatal ("illegal status in internal_getgrouplist");
+
+ if (status != NSS_STATUS_SUCCESS
+ && nss_next_action (nip, status) == NSS_ACTION_RETURN)
+ break;
+
+ if (nip->next == NULL)
+ no_more = -1;
+ else
+ nip = nip->next;
+ }
+
+ ssize_t total;
+ ssize_t written;
+ out:
+ if (start == 0)
+ {
+ /* Nothing found. Create a negative result record. */
+ written = total = sizeof (notfound);
+
+ if (he != NULL && all_tryagain)
+ {
+ /* If we have an old record available but cannot find one now
+ because the service is not available we keep the old record
+ and make sure it does not get removed. */
+ if (reload_count != UINT_MAX && dh->nreloads == reload_count)
+ /* Do not reset the value if we never not reload the record. */
+ dh->nreloads = reload_count - 1;
+ }
+ else
+ {
+ /* We have no data. This means we send the standard reply for this
+ case. */
+ if (fd != -1)
+ written = TEMP_FAILURE_RETRY (write (fd, &notfound, total));
+
+ dataset = mempool_alloc (db, sizeof (struct dataset) + req->key_len);
+ /* If we cannot permanently store the result, so be it. */
+ if (dataset != NULL)
+ {
+ dataset->head.allocsize = sizeof (struct dataset) + req->key_len;
+ dataset->head.recsize = total;
+ dataset->head.notfound = true;
+ dataset->head.nreloads = 0;
+ dataset->head.usable = true;
+
+ /* Compute the timeout time. */
+ dataset->head.timeout = time (NULL) + db->negtimeout;
+
+ /* This is the reply. */
+ memcpy (&dataset->resp, &notfound, total);
+
+ /* Copy the key data. */
+ char *key_copy = memcpy (dataset->strdata, key, req->key_len);
+
+ /* If necessary, we also propagate the data to disk. */
+ if (db->persistent)
+ {
+ // XXX async OK?
+ uintptr_t pval = (uintptr_t) dataset & ~pagesize_m1;
+ msync ((void *) pval,
+ ((uintptr_t) dataset & pagesize_m1)
+ + sizeof (struct dataset) + req->key_len, MS_ASYNC);
+ }
+
+ /* Now get the lock to safely insert the records. */
+ pthread_rwlock_rdlock (&db->lock);
+
+ if (cache_add (req->type, key_copy, req->key_len,
+ &dataset->head, true, db, uid) < 0)
+ /* Ensure the data can be recovered. */
+ dataset->head.usable = false;
+
+ pthread_rwlock_unlock (&db->lock);
+
+ /* Mark the old entry as obsolete. */
+ if (dh != NULL)
+ dh->usable = false;
+ }
+ else
+ ++db->head->addfailed;
+ }
+ }
+ else
+ {
+
+ written = total = sizeof (struct dataset) + start * sizeof (int32_t);
+
+ /* If we refill the cache, first assume the reconrd did not
+ change. Allocate memory on the cache since it is likely
+ discarded anyway. If it turns out to be necessary to have a
+ new record we can still allocate real memory. */
+ bool alloca_used = false;
+ dataset = NULL;
+
+ if (he == NULL)
+ {
+ dataset = (struct dataset *) mempool_alloc (db,
+ total + req->key_len);
+ if (dataset == NULL)
+ ++db->head->addfailed;
+ }
+
+ if (dataset == NULL)
+ {
+ /* We cannot permanently add the result in the moment. But
+ we can provide the result as is. Store the data in some
+ temporary memory. */
+ dataset = (struct dataset *) alloca (total + req->key_len);
+
+ /* We cannot add this record to the permanent database. */
+ alloca_used = true;
+ }
+
+ dataset->head.allocsize = total + req->key_len;
+ dataset->head.recsize = total - offsetof (struct dataset, resp);
+ dataset->head.notfound = false;
+ dataset->head.nreloads = he == NULL ? 0 : (dh->nreloads + 1);
+ dataset->head.usable = true;
+
+ /* Compute the timeout time. */
+ dataset->head.timeout = time (NULL) + db->postimeout;
+
+ dataset->resp.version = NSCD_VERSION;
+ dataset->resp.found = 1;
+ dataset->resp.ngrps = start;
+
+ char *cp = dataset->strdata;
+
+ /* Copy the GID values. If the size of the types match this is
+ very simple. */
+ if (sizeof (gid_t) == sizeof (int32_t))
+ cp = mempcpy (cp, groups, start * sizeof (gid_t));
+ else
+ {
+ gid_t *gcp = (gid_t *) cp;
+
+ for (int i = 0; i < start; ++i)
+ *gcp++ = groups[i];
+
+ cp = (char *) gcp;
+ }
+
+ /* Finally the user name. */
+ memcpy (cp, key, req->key_len);
+
+ /* Now we can determine whether on refill we have to create a new
+ record or not. */
+ if (he != NULL)
+ {
+ assert (fd == -1);
+
+ if (total + req->key_len == dh->allocsize
+ && total - offsetof (struct dataset, resp) == dh->recsize
+ && memcmp (&dataset->resp, dh->data,
+ dh->allocsize - offsetof (struct dataset, resp)) == 0)
+ {
+ /* The data has not changed. We will just bump the
+ timeout value. Note that the new record has been
+ allocated on the stack and need not be freed. */
+ dh->timeout = dataset->head.timeout;
+ ++dh->nreloads;
+ }
+ else
+ {
+ /* We have to create a new record. Just allocate
+ appropriate memory and copy it. */
+ struct dataset *newp
+ = (struct dataset *) mempool_alloc (db, total + req->key_len);
+ if (newp != NULL)
+ {
+ /* Adjust pointer into the memory block. */
+ cp = (char *) newp + (cp - (char *) dataset);
+
+ dataset = memcpy (newp, dataset, total + req->key_len);
+ alloca_used = false;
+ }
+
+ /* Mark the old record as obsolete. */
+ dh->usable = false;
+ }
+ }
+ else
+ {
+ /* We write the dataset before inserting it to the database
+ since while inserting this thread might block and so would
+ unnecessarily let the receiver wait. */
+ assert (fd != -1);
+
+ written = TEMP_FAILURE_RETRY (write (fd, &dataset->resp, total));
+ }
+
+
+ /* Add the record to the database. But only if it has not been
+ stored on the stack. */
+ if (! alloca_used)
+ {
+ /* If necessary, we also propagate the data to disk. */
+ if (db->persistent)
+ {
+ // XXX async OK?
+ uintptr_t pval = (uintptr_t) dataset & ~pagesize_m1;
+ msync ((void *) pval,
+ ((uintptr_t) dataset & pagesize_m1) + total +
+ req->key_len, MS_ASYNC);
+ }
+
+ /* Now get the lock to safely insert the records. */
+ pthread_rwlock_rdlock (&db->lock);
+
+ if (cache_add (INITGROUPS, cp, req->key_len, &dataset->head, true,
+ db, uid) < 0)
+ /* Could not allocate memory. Make sure the data gets
+ discarded. */
+ dataset->head.usable = false;
+
+ pthread_rwlock_unlock (&db->lock);
+ }
+ }
+
+ free (groups);
+
+ if (__builtin_expect (written != total, 0) && debug_level > 0)
+ {
+ char buf[256];
+ dbg_log (_("short write in %s: %s"), __FUNCTION__,
+ strerror_r (errno, buf, sizeof (buf)));
+ }
+}
+
+
+void
+addinitgroups (struct database_dyn *db, int fd, request_header *req, void *key,
+ uid_t uid)
+{
+ addinitgroupsX (db, fd, req, key, uid, NULL, NULL);
+}
+
+
+void
+readdinitgroups (struct database_dyn *db, struct hashentry *he,
+ struct datahead *dh)
+{
+ request_header req =
+ {
+ .type = INITGROUPS,
+ .key_len = he->len
+ };
+
+ addinitgroupsX (db, -1, &req, db->data + he->key, he->owner, he, dh);
+}