blob: bf1deecc41d0bb8743dd3847b6736e8414960137 [file] [log] [blame]
/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <fork.h>
#include <atomic.h>
/* Lock to protect allocation and deallocation of fork handlers. */
int __fork_lock = LLL_LOCK_INITIALIZER;
/* Number of pre-allocated handler entries. */
#define NHANDLER 48
/* Memory pool for fork handler structures. */
static struct fork_handler_pool
{
struct fork_handler_pool *next;
struct fork_handler mem[NHANDLER];
} fork_handler_pool;
static struct fork_handler *
fork_handler_alloc (void)
{
struct fork_handler_pool *runp = &fork_handler_pool;
struct fork_handler *result = NULL;
unsigned int i;
do
{
/* Search for an empty entry. */
for (i = 0; i < NHANDLER; ++i)
if (runp->mem[i].refcntr == 0)
goto found;
}
while ((runp = runp->next) != NULL);
/* We have to allocate a new entry. */
runp = (struct fork_handler_pool *) calloc (1, sizeof (*runp));
if (runp != NULL)
{
/* Enqueue the new memory pool into the list. */
runp->next = fork_handler_pool.next;
fork_handler_pool.next = runp;
/* We use the last entry on the page. This means when we start
searching from the front the next time we will find the first
entry unused. */
i = NHANDLER - 1;
found:
result = &runp->mem[i];
result->refcntr = 1;
result->need_signal = 0;
}
return result;
}
int
__register_atfork (prepare, parent, child, dso_handle)
void (*prepare) (void);
void (*parent) (void);
void (*child) (void);
void *dso_handle;
{
/* Get the lock to not conflict with other allocations. */
lll_lock (__fork_lock, LLL_PRIVATE);
struct fork_handler *newp = fork_handler_alloc ();
if (newp != NULL)
{
/* Initialize the new record. */
newp->prepare_handler = prepare;
newp->parent_handler = parent;
newp->child_handler = child;
newp->dso_handle = dso_handle;
__linkin_atfork (newp);
}
/* Release the lock. */
lll_unlock (__fork_lock, LLL_PRIVATE);
return newp == NULL ? ENOMEM : 0;
}
libc_hidden_def (__register_atfork)
void
attribute_hidden
__linkin_atfork (struct fork_handler *newp)
{
/* GRTE's patches for async-signal-safe TLS can cause a race
condition in which ptmalloc_init is called from more than one
thread. (allocate_dtv normally calls calloc which invokes
ptmalloc_init via hook while creating the first thread, but our
code calls __signal_safe_calloc which does not run hooks.)
ptmalloc_init tries to be idempotent in case of multiple threads,
but in glibc-2.19, it fills in atfork hooks from an
un-lock-protected global static atfork_mem, which is a bad idea;
it can result in the same allocated object being passed to this
routine more than once. This function then sets the object's next
pointer to point to itself, resulting in a hang when the program
tries to exit.
This problem has been (indirectly) resolved in upstream glibc by
rewriting the whole thing so that thread setup is not done with
atforks or static variables, but the changes are extensive and
would not backport reliably. Our race is somewhat difficult to
trigger - it requires a program to start creating threads
*before* any kind of memory allocation whatsoever. So given all
this, the safest route is simply to detect when the fork handler
is already present, and skip adding it altogether.
Note that while it's conceivable that calls to pthread_atfork
would result in the atfork_mem object not being at the head of
the list, but testing seems unable to generate such a case. */
struct fork_handler *scanp;
for (scanp = __fork_handlers; scanp != NULL; scanp = scanp->next)
if (newp == scanp)
return;
do
newp->next = __fork_handlers;
while (catomic_compare_and_exchange_bool_acq (&__fork_handlers,
newp, newp->next) != 0);
}
libc_freeres_fn (free_mem)
{
/* Get the lock to not conflict with running forks. */
lll_lock (__fork_lock, LLL_PRIVATE);
/* No more fork handlers. */
__fork_handlers = NULL;
/* Free eventually allocated memory blocks for the object pool. */
struct fork_handler_pool *runp = fork_handler_pool.next;
memset (&fork_handler_pool, '\0', sizeof (fork_handler_pool));
/* Release the lock. */
lll_unlock (__fork_lock, LLL_PRIVATE);
/* We can free the memory after releasing the lock. */
while (runp != NULL)
{
struct fork_handler_pool *oldp = runp;
runp = runp->next;
free (oldp);
}
}