diff options
Diffstat (limited to 'sysdeps/nacl/dl-map-segments.h')
-rw-r--r-- | sysdeps/nacl/dl-map-segments.h | 304 |
1 files changed, 155 insertions, 149 deletions
diff --git a/sysdeps/nacl/dl-map-segments.h b/sysdeps/nacl/dl-map-segments.h index d93badf1d3..f305da304a 100644 --- a/sysdeps/nacl/dl-map-segments.h +++ b/sysdeps/nacl/dl-map-segments.h @@ -34,12 +34,12 @@ read_in_data (int fd, void *data, size_t len, off_t pos) { ssize_t n = __read (fd, data, len); if (__glibc_unlikely (n < 0)) - return true; + return true; if (__glibc_unlikely (n == 0)) - { - errno = EFTYPE; - return true; - } + { + errno = EFTYPE; + return true; + } data += n; len -= n; } @@ -48,59 +48,63 @@ read_in_data (int fd, void *data, size_t len, off_t pos) static const char * _dl_map_segments (struct link_map *l, int fd, - const ElfW(Ehdr) *header, int type, - const struct loadcmd loadcmds[], size_t nloadcmds, - const size_t maplength, bool has_holes, - struct link_map *loader) + const ElfW(Ehdr) *header, int type, + const struct loadcmd loadcmds[], size_t nloadcmds, + const size_t maplength, bool has_holes, + struct link_map *loader) { if (__builtin_expect (type, ET_DYN) == ET_DYN) { /* This is a position-independent shared object. Let the system - choose where to place it. + choose where to place it. - As a refinement, sometimes we have an address that we would - prefer to map such objects at; but this is only a preference, - the OS can do whatever it likes. */ + As a refinement, sometimes we have an address that we would + prefer to map such objects at; but this is only a preference, + the OS can do whatever it likes. */ ElfW(Addr) mappref - = (ELF_PREFERRED_ADDRESS (loader, maplength, - loadcmds[0].mapstart & GLRO(dl_use_load_bias)) - - MAP_BASE_ADDR (l)); + = (ELF_PREFERRED_ADDRESS (loader, maplength, + loadcmds[0].mapstart & GLRO(dl_use_load_bias)) + - MAP_BASE_ADDR (l)); uintptr_t mapstart; if (__glibc_likely (loadcmds[0].prot & PROT_EXEC)) - { - uintptr_t code_size = loadcmds[0].allocend - loadcmds[0].mapstart; - uintptr_t data_offset; - size_t data_size; + { + /* When there is a code segment, we must use the + allocate_code_data interface to choose a location. */ - if (__glibc_likely (nloadcmds > 1)) - { - data_offset = loadcmds[1].mapstart - loadcmds[0].mapstart; - data_size = ALIGN_UP (maplength - data_offset, - GLRO(dl_pagesize)); - } - else - { - data_offset = 0; - data_size = 0; - } + uintptr_t code_size = loadcmds[0].allocend - loadcmds[0].mapstart; + uintptr_t data_offset; + size_t data_size; - int error = __nacl_irt_code_data_alloc.allocate_code_data - (mappref, code_size, data_offset, data_size, &mapstart); - if (__glibc_unlikely (error)) - { - errno = error; - return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT; - } - } + if (__glibc_likely (nloadcmds > 1)) + { + data_offset = loadcmds[1].mapstart - loadcmds[0].mapstart; + data_size = ALIGN_UP (maplength - data_offset, + GLRO(dl_pagesize)); + } + else + { + data_offset = 0; + data_size = 0; + } + + int error = __nacl_irt_code_data_alloc.allocate_code_data + (mappref, code_size, data_offset, data_size, &mapstart); + if (__glibc_unlikely (error)) + { + errno = error; + return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT; + } + } else - { - void *mapped = __mmap ((void *) mappref, maplength, - PROT_NONE, MAP_ANON, -1, 0); - if (__glibc_unlikely (mapped == MAP_FAILED)) - return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT; - mapstart = (uintptr_t) mapped; - } + { + /* With no code pages involved, plain mmap works fine. */ + void *mapped = __mmap ((void *) mappref, maplength, + PROT_NONE, MAP_ANON, -1, 0); + if (__glibc_unlikely (mapped == MAP_FAILED)) + return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT; + mapstart = (uintptr_t) mapped; + } l->l_addr = mapstart - loadcmds[0].mapstart; } @@ -110,118 +114,120 @@ _dl_map_segments (struct link_map *l, int fd, l->l_map_end = l->l_map_start + maplength; l->l_contiguous = !has_holes; + /* Now actually map (or read) in each segment. */ for (const struct loadcmd *c = loadcmds; c < &loadcmds[nloadcmds]; ++c) if (__glibc_likely (c->mapend > c->mapstart)) { - /* Unlike POSIX mmap, NaCl's mmap does not reliably handle COW - faults in the remainder of the final partial page. So to get - the expected behavior for the unaligned boundary between data - and bss, it's necessary to allocate the final partial page of - data as anonymous memory rather than mapping it from the file. */ + /* Unlike POSIX mmap, NaCl's mmap does not reliably handle COW + faults in the remainder of the final partial page. So to get + the expected behavior for the unaligned boundary between data + and bss, it's necessary to allocate the final partial page of + data as anonymous memory rather than mapping it from the file. */ - size_t maplen = c->mapend - c->mapstart; - if (c->mapend > c->dataend && c->allocend > c->dataend) - maplen = (c->dataend & -GLRO(dl_pagesize)) - c->mapstart; + size_t maplen = c->mapend - c->mapstart; + if (c->mapend > c->dataend && c->allocend > c->dataend) + maplen = (c->dataend & -GLRO(dl_pagesize)) - c->mapstart; - /* Map the segment contents from the file. */ - if (__glibc_unlikely (__mmap ((void *) (l->l_addr + c->mapstart), - maplen, c->prot, - MAP_FIXED|MAP_COPY|MAP_FILE, - fd, c->mapoff) - == MAP_FAILED)) - { - switch (errno) - { - case EINVAL: - case ENOTSUP: - case ENOSYS: - break; - default: - return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT; - } + /* Map the segment contents from the file. */ + if (__glibc_unlikely (__mmap ((void *) (l->l_addr + c->mapstart), + maplen, c->prot, + MAP_FIXED|MAP_COPY|MAP_FILE, + fd, c->mapoff) + == MAP_FAILED)) + { + switch (errno) + { + case EINVAL: + case ENOTSUP: + case ENOSYS: + break; + default: + return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT; + } - /* No mmap support for this file. */ - if (c->prot & PROT_EXEC) - { - /* Read the data into a temporary buffer. */ - const size_t len = c->mapend - c->mapstart; - void *data = __mmap (NULL, len, PROT_READ | PROT_WRITE, - MAP_ANON|MAP_PRIVATE, -1, 0); - if (__glibc_unlikely (data == MAP_FAILED)) - return DL_MAP_SEGMENTS_ERROR_MAP_ZERO_FILL; - if (read_in_data (fd, data, len, c->mapoff)) - return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT; - int error = __nacl_irt_dyncode.dyncode_create - ((void *) (l->l_addr + c->mapstart), data, len); - __munmap (data, len); - if (__glibc_unlikely (error)) - { - errno = error; - return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT; - } - } - else - { - /* Allocate the pages. */ - if (__mmap ((void *) (l->l_addr + c->mapstart), - c->mapend - c->mapstart, c->prot | PROT_WRITE, - MAP_FIXED|MAP_ANON|MAP_PRIVATE, -1, 0) - == MAP_FAILED) - return DL_MAP_SEGMENTS_ERROR_MAP_ZERO_FILL; - /* Now read in the data. */ - if (read_in_data (fd, (void *) (l->l_addr + c->mapstart), - c->dataend - c->mapstart, c->mapoff)) - return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT; - /* Now that we've filled the pages, reset the page - protections to what they should be. */ - if (!(c->prot & PROT_WRITE) - && __mprotect ((void *) (l->l_addr + c->mapstart), - c->mapend - c->mapstart, c->prot) < 0) - return DL_MAP_SEGMENTS_ERROR_MPROTECT; - } - } - else if (c->allocend > c->dataend) - { - /* Extra zero pages should appear at the end of this segment, - after the data mapped from the file. */ + /* No mmap support for this file. */ + if (c->prot & PROT_EXEC) + { + /* Read the data into a temporary buffer. */ + const size_t len = c->mapend - c->mapstart; + void *data = __mmap (NULL, len, PROT_READ | PROT_WRITE, + MAP_ANON|MAP_PRIVATE, -1, 0); + if (__glibc_unlikely (data == MAP_FAILED)) + return DL_MAP_SEGMENTS_ERROR_MAP_ZERO_FILL; + if (read_in_data (fd, data, len, c->mapoff)) + return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT; + /* Now validate and install the code. */ + int error = __nacl_irt_dyncode.dyncode_create + ((void *) (l->l_addr + c->mapstart), data, len); + __munmap (data, len); + if (__glibc_unlikely (error)) + { + errno = error; + return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT; + } + } + else + { + /* Allocate the pages. */ + if (__mmap ((void *) (l->l_addr + c->mapstart), + c->mapend - c->mapstart, c->prot | PROT_WRITE, + MAP_FIXED|MAP_ANON|MAP_PRIVATE, -1, 0) + == MAP_FAILED) + return DL_MAP_SEGMENTS_ERROR_MAP_ZERO_FILL; + /* Now read in the data. */ + if (read_in_data (fd, (void *) (l->l_addr + c->mapstart), + c->dataend - c->mapstart, c->mapoff)) + return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT; + /* Now that we've filled the pages, reset the page + protections to what they should be. */ + if (!(c->prot & PROT_WRITE) + && __mprotect ((void *) (l->l_addr + c->mapstart), + c->mapend - c->mapstart, c->prot) < 0) + return DL_MAP_SEGMENTS_ERROR_MPROTECT; + } + } + else if (c->allocend > c->dataend) + { + /* Extra zero pages should appear at the end of this segment, + after the data mapped from the file. */ - uintptr_t allocend = c->mapend; - if (c->mapend > c->dataend) - { - /* The final data page was partial. So we didn't map it in. - Instead, we must allocate an anonymous page to fill. */ - if (c->prot & PROT_WRITE) - /* Do the whole allocation right here. */ - allocend = c->allocend; - if (__mmap ((void *) (l->l_addr + c->mapstart + maplen), - allocend - (c->mapstart + maplen), c->prot, - MAP_FIXED|MAP_ANON|MAP_PRIVATE, -1, 0) - == MAP_FAILED) - return DL_MAP_SEGMENTS_ERROR_MAP_ZERO_FILL; - if (read_in_data (fd, - (void *) (l->l_addr + c->mapstart + maplen), - c->dataend & (GLRO(dl_pagesize) - 1), - c->mapoff + maplen)) - return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT; - /* Now that we've filled the page, reset its - protections to what they should be. */ - if (!(c->prot & PROT_WRITE) - && __mprotect ((void *) (l->l_addr + c->mapstart + maplen), - c->mapend - (c->mapstart + maplen), - c->prot) < 0) - return DL_MAP_SEGMENTS_ERROR_MPROTECT; - } + uintptr_t allocend = c->mapend; + if (c->mapend > c->dataend) + { + /* The final data page was partial. So we didn't map it in. + Instead, we must allocate an anonymous page to fill. */ + if (c->prot & PROT_WRITE) + /* Do the whole allocation right here. */ + allocend = c->allocend; + if (__mmap ((void *) (l->l_addr + c->mapstart + maplen), + allocend - (c->mapstart + maplen), c->prot, + MAP_FIXED|MAP_ANON|MAP_PRIVATE, -1, 0) + == MAP_FAILED) + return DL_MAP_SEGMENTS_ERROR_MAP_ZERO_FILL; + if (read_in_data (fd, + (void *) (l->l_addr + c->mapstart + maplen), + c->dataend & (GLRO(dl_pagesize) - 1), + c->mapoff + maplen)) + return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT; + /* Now that we've filled the page, reset its + protections to what they should be. */ + if (!(c->prot & PROT_WRITE) + && __mprotect ((void *) (l->l_addr + c->mapstart + maplen), + c->mapend - (c->mapstart + maplen), + c->prot) < 0) + return DL_MAP_SEGMENTS_ERROR_MPROTECT; + } - /* Now allocate the pure zero-fill pages. */ - if (allocend < c->allocend - && (__mmap ((void *) (l->l_addr + c->mapstart + allocend), - c->allocend - (c->mapstart + allocend), c->prot, - MAP_FIXED|MAP_ANON|MAP_PRIVATE, -1, 0) - == MAP_FAILED)) - return DL_MAP_SEGMENTS_ERROR_MAP_ZERO_FILL; - } + /* Now allocate the pure zero-fill pages. */ + if (allocend < c->allocend + && (__mmap ((void *) (l->l_addr + c->mapstart + allocend), + c->allocend - (c->mapstart + allocend), c->prot, + MAP_FIXED|MAP_ANON|MAP_PRIVATE, -1, 0) + == MAP_FAILED)) + return DL_MAP_SEGMENTS_ERROR_MAP_ZERO_FILL; + } - _dl_postprocess_loadcmd (l, header, c); + _dl_postprocess_loadcmd (l, header, c); } /* Notify ELF_PREFERRED_ADDRESS that we have to load this one |