diff --git a/bin/heroku-hhvm-apache2 b/bin/heroku-hhvm-apache2 index fb649e050..b5b8c84d3 100755 --- a/bin/heroku-hhvm-apache2 +++ b/bin/heroku-hhvm-apache2 @@ -232,8 +232,9 @@ httpd_config=$(php_passthrough "$httpd_config") # make a shared pipe; we'll write the name of the process that exits to it once that happens, and wait for that event below # this particular call works on Linux and Mac OS (will create a literal ".XXXXXX" on Mac, but that doesn't matter). wait_pipe=$(mktemp -t "heroku.waitpipe-$PORT.XXXXXX" -u) -rm -rf $wait_pipe +rm -f $wait_pipe mkfifo $wait_pipe +exec 3<> $wait_pipe # trap SIGINT/SIGQUIT (ctrl+c or ctrl+\ on the console), SIGTERM, and EXIT (upon failure of any command due to set -e, or because of the exit 1 at the very end), kill subshell child processes, then subshells # 1) restore EXIT trap immediately, or the exit at the end of the line will trigger this trap again @@ -241,24 +242,24 @@ mkfifo $wait_pipe # 3) kill child processes (that's the sub-shells); it's likely that some of them have already disappeared, so xarg || true it too and suppress "no such process" complaints by sending them to /dev/null # FIXME: this doesn't currently fire when the subshells themselves are terminated # TODO: for extra brownie points, move to a function and curry for each given signal, passing the signal in as an arg, so we can use different exit codes or messages -trap 'trap - EXIT; echo "Going down, terminating child processes..." >&2; jobs -p | xargs -n1 pkill -TERM -P &> /dev/null || true; jobs -p | xargs -n1 kill -TERM 2> /dev/null || true; exit' SIGINT SIGQUIT SIGTERM EXIT +trap 'trap - EXIT; echo "Going down, terminating child processes..." >&2; rm -f ${wait_pipe} || true; jobs -p | xargs -n1 pkill -TERM -P &> /dev/null || true; jobs -p | xargs -n1 kill -TERM 2> /dev/null || true; exit' SIGINT SIGQUIT SIGTERM EXIT # launch processes. all run using || true to prevent premature exit of the subshell (from set -e) regardless of exit status # after a subprocess terminates (because it was killed or because it crashed or because it quit voluntarily), we write the name to FD 3 (because programs could output something on FD 1 (STDOUT) or FD 2 (STDERR)) and send that to the shared pipe (mkfifo) above, and a read command further down waits for something to come in on the shared pipe # redirect logs to STDERR; write "tail ..." to the shared pipe if it exits [[ $verbose ]] && echo "Starting log redirection..." >&2 -( touch "${logs[@]}"; tail -qF -n 0 "${logs[@]}" 1>&2 || true; echo 'tail "${logs[@]}"' >&3; ) 3> $wait_pipe & +( touch "${logs[@]}"; tail -qF -n 0 "${logs[@]}" 1>&2 || true; echo 'tail "${logs[@]}"' >&3; ) & # start HHVM; write "hhvm" to the shared pipe if it exits echo "Starting hhvm..." >&2 -( hhvm --mode server -vServer.Type=fastcgi -vServer.FileSocket=/tmp/heroku.fcgi.$PORT.sock -c "$php_config" || true; echo "hhvm" >&3; ) 3> $wait_pipe & +( hhvm --mode server -vServer.Type=fastcgi -vServer.FileSocket=/tmp/heroku.fcgi.$PORT.sock -c "$php_config" || true; echo "hhvm" >&3; ) & # wait a few seconds for HHVM to finish initializing; otherwise an early request might break Apache with the FastCGI pipe not being ready # start apache; write "httpd" to the shared pipe if it exits echo "Starting httpd..." >&2 -( sleep 2; httpd -D NO_DETACH -c "Include $httpd_config" || true; echo "httpd" >&3; ) 3> $wait_pipe & +( sleep 2; httpd -D NO_DETACH -c "Include $httpd_config" || true; echo "httpd" >&3; ) & # wait for something to come from the shared pipe, which means that the given process was killed or has failed -read exitproc < $wait_pipe +read exitproc <&3 # we'll only reach this if one of the processes above has terminated echo "Process exited unexpectedly: $exitproc" >&2 diff --git a/bin/heroku-hhvm-nginx b/bin/heroku-hhvm-nginx index 7b2511680..bc0d8760a 100755 --- a/bin/heroku-hhvm-nginx +++ b/bin/heroku-hhvm-nginx @@ -232,8 +232,9 @@ nginx_config=$(php_passthrough "$nginx_config") # make a shared pipe; we'll write the name of the process that exits to it once that happens, and wait for that event below # this particular call works on Linux and Mac OS (will create a literal ".XXXXXX" on Mac, but that doesn't matter). wait_pipe=$(mktemp -t "heroku.waitpipe-$PORT.XXXXXX" -u) -rm -rf $wait_pipe +rm -f $wait_pipe mkfifo $wait_pipe +exec 3<> $wait_pipe # trap SIGINT/SIGQUIT (ctrl+c or ctrl+\ on the console), SIGTERM, and EXIT (upon failure of any command due to set -e, or because of the exit 1 at the very end), kill subshell child processes, then subshells # 1) restore EXIT trap immediately, or the exit at the end of the line will trigger this trap again @@ -241,24 +242,24 @@ mkfifo $wait_pipe # 3) kill child processes (that's the sub-shells); it's likely that some of them have already disappeared, so xarg || true it too and suppress "no such process" complaints by sending them to /dev/null # FIXME: this doesn't currently fire when the subshells themselves are terminated # TODO: for extra brownie points, move to a function and curry for each given signal, passing the signal in as an arg, so we can use different exit codes or messages -trap 'trap - EXIT; echo "Going down, terminating child processes..." >&2; jobs -p | xargs -n1 pkill -TERM -P &> /dev/null || true; jobs -p | xargs -n1 kill -TERM 2> /dev/null || true; exit' SIGINT SIGQUIT SIGTERM EXIT +trap 'trap - EXIT; echo "Going down, terminating child processes..." >&2; rm -f ${wait_pipe} || true; jobs -p | xargs -n1 pkill -TERM -P &> /dev/null || true; jobs -p | xargs -n1 kill -TERM 2> /dev/null || true; exit' SIGINT SIGQUIT SIGTERM EXIT # launch processes. all run using || true to prevent premature exit of the subshell (from set -e) regardless of exit status # after a subprocess terminates (because it was killed or because it crashed or because it quit voluntarily), we write the name to FD 3 (because programs could output something on FD 1 (STDOUT) or FD 2 (STDERR)) and send that to the shared pipe (mkfifo) above, and a read command further down waits for something to come in on the shared pipe # redirect logs to STDERR; write "tail ..." to the shared pipe if it exits [[ $verbose ]] && echo "Starting log redirection..." >&2 -( touch "${logs[@]}"; tail -qF -n 0 "${logs[@]}" 1>&2 || true; echo 'tail "${logs[@]}"' >&3; ) 3> $wait_pipe & +( touch "${logs[@]}"; tail -qF -n 0 "${logs[@]}" 1>&2 || true; echo 'tail "${logs[@]}"' >&3; ) & # start HHVM; write "hhvm" to the shared pipe if it exits echo "Starting hhvm..." >&2 -( hhvm --mode server -vServer.Type=fastcgi -vServer.FileSocket=/tmp/heroku.fcgi.$PORT.sock -c "$php_config" || true; echo "hhvm" >&3; ) 3> $wait_pipe & +( hhvm --mode server -vServer.Type=fastcgi -vServer.FileSocket=/tmp/heroku.fcgi.$PORT.sock -c "$php_config" || true; echo "hhvm" >&3; ) & # wait a few seconds for HHVM to finish initializing; otherwise an early request might break nginx with the FastCGI pipe not being ready # start nginx; write "nginx" to the shared pipe if it exits echo "Starting nginx..." >&2 -( sleep 2; nginx -g "daemon off; include $nginx_config;" || true; echo "nginx" >&3; ) 3> $wait_pipe & +( sleep 2; nginx -g "daemon off; include $nginx_config;" || true; echo "nginx" >&3; ) & # wait for something to come from the shared pipe, which means that the given process was killed or has failed -read exitproc < $wait_pipe +read exitproc <&3 # we'll only reach this if one of the processes above has terminated echo "Process exited unexpectedly: $exitproc" >&2 diff --git a/bin/heroku-php-apache2 b/bin/heroku-php-apache2 index 4be0e9533..f49047686 100755 --- a/bin/heroku-php-apache2 +++ b/bin/heroku-php-apache2 @@ -273,8 +273,9 @@ httpd_config=$(php_passthrough "$httpd_config") # make a shared pipe; we'll write the name of the process that exits to it once that happens, and wait for that event below # this particular call works on Linux and Mac OS (will create a literal ".XXXXXX" on Mac, but that doesn't matter). wait_pipe=$(mktemp -t "heroku.waitpipe-$PORT.XXXXXX" -u) -rm -rf $wait_pipe +rm -f $wait_pipe mkfifo $wait_pipe +exec 3<> $wait_pipe # trap SIGINT/SIGQUIT (ctrl+c or ctrl+\ on the console), SIGTERM, and EXIT (upon failure of any command due to set -e, or because of the exit 1 at the very end), kill subshell child processes, then subshells # 1) restore EXIT trap immediately, or the exit at the end of the line will trigger this trap again @@ -282,25 +283,25 @@ mkfifo $wait_pipe # 3) kill child processes (that's the sub-shells); it's likely that some of them have already disappeared, so xarg || true it too and suppress "no such process" complaints by sending them to /dev/null # FIXME: this doesn't currently fire when the subshells themselves are terminated # TODO: for extra brownie points, move to a function and curry for each given signal, passing the signal in as an arg, so we can use different exit codes or messages -trap 'trap - EXIT; echo "Going down, terminating child processes..." >&2; jobs -p | xargs -n1 pkill -TERM -P &> /dev/null || true; jobs -p | xargs -n1 kill -TERM 2> /dev/null || true; exit' SIGINT SIGQUIT SIGTERM EXIT +trap 'trap - EXIT; echo "Going down, terminating child processes..." >&2; rm -f ${wait_pipe} || true; jobs -p | xargs -n1 pkill -TERM -P &> /dev/null || true; jobs -p | xargs -n1 kill -TERM 2> /dev/null || true; exit' SIGINT SIGQUIT SIGTERM EXIT # launch processes. all run using || true to prevent premature exit of the subshell (from set -e) regardless of exit status # after a subprocess terminates (because it was killed or because it crashed or because it quit voluntarily), we write the name to FD 3 (because programs could output something on FD 1 (STDOUT) or FD 2 (STDERR)) and send that to the shared pipe (mkfifo) above, and a read command further down waits for something to come in on the shared pipe # redirect logs to STDERR; write "tail ..." to the shared pipe if it exits [[ $verbose ]] && echo "Starting log redirection..." >&2 -( touch "${logs[@]}"; tail -qF -n 0 "${logs[@]}" | strip_fpm_child_said 1>&2 || true; echo 'tail "${logs[@]}"' >&3; ) 3> $wait_pipe & +( touch "${logs[@]}"; tail -qF -n 0 "${logs[@]}" | strip_fpm_child_said 1>&2 || true; echo 'tail "${logs[@]}"' >&3; ) & # start FPM; write "php-fpm" to the shared pipe if it exits echo "Starting php-fpm..." >&2 unset -f php-fpm # remove the alias we made earlier that would prevent newrelic from starting on php-fpm -v -( php-fpm --nodaemonize -y "$fpm_config" -c "$php_config" || true; echo "php-fpm" >&3; ) 3> $wait_pipe & +( php-fpm --nodaemonize -y "$fpm_config" -c "$php_config" || true; echo "php-fpm" >&3; ) & # wait a few seconds for FPM to finish initializing; otherwise an early request might break Apache with the FastCGI pipe not being ready # start apache; write "httpd" to the shared pipe if it exits echo "Starting httpd..." >&2 -( sleep 2; httpd -D NO_DETACH -c "Include $httpd_config" || true; echo "httpd" >&3; ) 3> $wait_pipe & +( sleep 2; httpd -D NO_DETACH -c "Include $httpd_config" || true; echo "httpd" >&3; ) & # wait for something to come from the shared pipe, which means that the given process was killed or has failed -read exitproc < $wait_pipe +read exitproc <&3 # we'll only reach this if one of the processes above has terminated echo "Process exited unexpectedly: $exitproc" >&2 diff --git a/bin/heroku-php-nginx b/bin/heroku-php-nginx index 11365b488..e17bf3f73 100755 --- a/bin/heroku-php-nginx +++ b/bin/heroku-php-nginx @@ -273,8 +273,9 @@ nginx_config=$(php_passthrough "$nginx_config") # make a shared pipe; we'll write the name of the process that exits to it once that happens, and wait for that event below # this particular call works on Linux and Mac OS (will create a literal ".XXXXXX" on Mac, but that doesn't matter). wait_pipe=$(mktemp -t "heroku.waitpipe-$PORT.XXXXXX" -u) -rm -rf $wait_pipe +rm -f $wait_pipe mkfifo $wait_pipe +exec 3<> $wait_pipe # trap SIGINT/SIGQUIT (ctrl+c or ctrl+\ on the console), SIGTERM, and EXIT (upon failure of any command due to set -e, or because of the exit 1 at the very end), kill subshell child processes, then subshells # 1) restore EXIT trap immediately, or the exit at the end of the line will trigger this trap again @@ -282,25 +283,25 @@ mkfifo $wait_pipe # 3) kill child processes (that's the sub-shells); it's likely that some of them have already disappeared, so xarg || true it too and suppress "no such process" complaints by sending them to /dev/null # FIXME: this doesn't currently fire when the subshells themselves are terminated # TODO: for extra brownie points, move to a function and curry for each given signal, passing the signal in as an arg, so we can use different exit codes or messages -trap 'trap - EXIT; echo "Going down, terminating child processes..." >&2; jobs -p | xargs -n1 pkill -TERM -P &> /dev/null || true; jobs -p | xargs -n1 kill -TERM 2> /dev/null || true; exit' SIGINT SIGQUIT SIGTERM EXIT +trap 'trap - EXIT; echo "Going down, terminating child processes..." >&2; rm -f ${wait_pipe} || true; jobs -p | xargs -n1 pkill -TERM -P &> /dev/null || true; jobs -p | xargs -n1 kill -TERM 2> /dev/null || true; exit' SIGINT SIGQUIT SIGTERM EXIT # launch processes. all run using || true to prevent premature exit of the subshell (from set -e) regardless of exit status # after a subprocess terminates (because it was killed or because it crashed or because it quit voluntarily), we write the name to FD 3 (because programs could output something on FD 1 (STDOUT) or FD 2 (STDERR)) and send that to the shared pipe (mkfifo) above, and a read command further down waits for something to come in on the shared pipe # redirect logs to STDERR; write "tail ..." to the shared pipe if it exits [[ $verbose ]] && echo "Starting log redirection..." >&2 -( touch "${logs[@]}"; tail -qF -n 0 "${logs[@]}" | strip_fpm_child_said 1>&2 || true; echo 'tail "${logs[@]}"' >&3; ) 3> $wait_pipe & +( touch "${logs[@]}"; tail -qF -n 0 "${logs[@]}" | strip_fpm_child_said 1>&2 || true; echo 'tail "${logs[@]}"' >&3; ) & # start FPM; write "php-fpm" to the shared pipe if it exits echo "Starting php-fpm..." >&2 unset -f php-fpm # remove the alias we made earlier that would prevent newrelic from starting on php-fpm -v -( php-fpm --nodaemonize -y "$fpm_config" -c "$php_config" || true; echo "php-fpm" >&3; ) 3> $wait_pipe & +( php-fpm --nodaemonize -y "$fpm_config" -c "$php_config" || true; echo "php-fpm" >&3; ) & # wait a few seconds for FPM to finish initializing; otherwise an early request might break nginx with the FastCGI pipe not being ready # start nginx; write "nginx" to the shared pipe if it exits echo "Starting nginx..." >&2 -( sleep 2; nginx -g "daemon off; include $nginx_config;" || true; echo "nginx" >&3; ) 3> $wait_pipe & +( sleep 2; nginx -g "daemon off; include $nginx_config;" || true; echo "nginx" >&3; ) & # wait for something to come from the shared pipe, which means that the given process was killed or has failed -read exitproc < $wait_pipe +read exitproc <&3 # we'll only reach this if one of the processes above has terminated echo "Process exited unexpectedly: $exitproc" >&2