diff --git a/wine-tkg-git/wine-tkg-patches/misc/fastsync/fastsync b/wine-tkg-git/wine-tkg-patches/misc/fastsync/fastsync index b8eb7b5c5..3b7df32c6 100644 --- a/wine-tkg-git/wine-tkg-patches/misc/fastsync/fastsync +++ b/wine-tkg-git/wine-tkg-patches/misc/fastsync/fastsync @@ -101,8 +101,10 @@ _patchname='ntsync-config.h.in-alt.patch' && _patchmsg="Using alternative config.h.in patchset for ntsync5" && nonuser_patcher fi if [ "$_protonify" = "true" ]; then - if ( cd "${srcdir}"/"${_winesrcdir}" && git merge-base --is-ancestor f41c434b88c63984082ad9d4627bef5d51434871 HEAD ); then + if ( cd "${srcdir}"/"${_winesrcdir}" && git merge-base --is-ancestor 1dfac2a252d0036c3bae08bf47f00582343a80fb HEAD ); then _patchname='ntsync5-staging-protonify.patch' && _patchmsg="Using ntsync patchset" && nonuser_patcher + elif ( cd "${srcdir}"/"${_winesrcdir}" && git merge-base --is-ancestor f41c434b88c63984082ad9d4627bef5d51434871 HEAD ); then + _patchname='ntsync5-staging-protonify-1dfac2a.patch' && _patchmsg="Using ntsync patchset" && nonuser_patcher elif ( cd "${srcdir}"/"${_winesrcdir}" && git merge-base --is-ancestor fd3de9005ef504a810aeb828c5b491a7bebd7888 HEAD ); then _patchname='ntsync5-staging-protonify-f41c434b.patch' && _patchmsg="Using ntsync patchset" && nonuser_patcher elif ( cd "${srcdir}"/"${_winesrcdir}" && git merge-base --is-ancestor 7eb72b7bb3d3ea771efddcb5273e8a69456548ff HEAD ); then @@ -111,8 +113,10 @@ _patchname='ntsync5-staging-protonify-7eb72b7b.patch' && _patchmsg="Using ntsync patchset" && nonuser_patcher fi else - if ( cd "${srcdir}"/"${_winesrcdir}" && git merge-base --is-ancestor f41c434b88c63984082ad9d4627bef5d51434871 HEAD ); then + if ( cd "${srcdir}"/"${_winesrcdir}" && git merge-base --is-ancestor 1dfac2a252d0036c3bae08bf47f00582343a80fb HEAD ); then _patchname='ntsync5-staging.patch' && _patchmsg="Using ntsync patchset" && nonuser_patcher + elif ( cd "${srcdir}"/"${_winesrcdir}" && git merge-base --is-ancestor f41c434b88c63984082ad9d4627bef5d51434871 HEAD ); then + _patchname='ntsync5-staging-1dfac2a.patch' && _patchmsg="Using ntsync patchset" && nonuser_patcher elif ( cd "${srcdir}"/"${_winesrcdir}" && git merge-base --is-ancestor fd3de9005ef504a810aeb828c5b491a7bebd7888 HEAD ); then _patchname='ntsync5-staging-f41c434b.patch' && _patchmsg="Using ntsync patchset" && nonuser_patcher elif ( cd "${srcdir}"/"${_winesrcdir}" && git merge-base --is-ancestor 7eb72b7bb3d3ea771efddcb5273e8a69456548ff HEAD ); then diff --git a/wine-tkg-git/wine-tkg-patches/misc/fastsync/legacy/ntsync5-staging-1dfac2a.patch b/wine-tkg-git/wine-tkg-patches/misc/fastsync/legacy/ntsync5-staging-1dfac2a.patch new file mode 100644 index 000000000..906acddc8 --- /dev/null +++ b/wine-tkg-git/wine-tkg-patches/misc/fastsync/legacy/ntsync5-staging-1dfac2a.patch @@ -0,0 +1,8290 @@ +diff --git a/README.esync b/README.esync +deleted file mode 100644 +index 11d8656..0000000 +--- a/README.esync ++++ /dev/null +@@ -1,196 +0,0 @@ +-This is eventfd-based synchronization, or 'esync' for short. Turn it on with +-WINEESYNC=1; debug it with +esync. +- +-== BUGS AND LIMITATIONS == +- +-Please let me know if you find any bugs. If you can, also attach a log with +-+seh,+pid,+esync,+server,+timestamp. +- +-If you get something like "eventfd: Too many open files" and then things start +-crashing, you've probably run out of file descriptors. esync creates one +-eventfd descriptor for each synchronization object, and some games may use a +-large number of these. Linux by default limits a process to 4096 file +-descriptors, which probably was reasonable back in the nineties but isn't +-really anymore. (Fortunately Debian and derivatives [Ubuntu, Mint] already +-have a reasonable limit.) To raise the limit you'll want to edit +-/etc/security/limits.conf and add a line like +- +-* hard nofile 1048576 +- +-then restart your session. +- +-On distributions using systemd, the settings in `/etc/security/limits.conf` +-will be overridden by systemd's own settings. If you run `ulimit -Hn` and it +-returns a lower number than the one you've previously set, then you can set +- +-DefaultLimitNOFILE=1048576 +- +-in both `/etc/systemd/system.conf` and `/etc/systemd/user.conf`. You can then +-execute `sudo systemctl daemon-reexec` and restart your session. Check again +-with `ulimit -Hn` that the limit is correct. +- +-Also note that if the wineserver has esync active, all clients also must, and +-vice versa. Otherwise things will probably crash quite badly. +- +-== EXPLANATION == +- +-The aim is to execute all synchronization operations in "user-space", that is, +-without going through wineserver. We do this using Linux's eventfd +-facility. The main impetus to using eventfd is so that we can poll multiple +-objects at once; in particular we can't do this with futexes, or pthread +-semaphores, or the like. The only way I know of to wait on any of multiple +-objects is to use select/poll/epoll to wait on multiple fds, and eventfd gives +-us those fds in a quite usable way. +- +-Whenever a semaphore, event, or mutex is created, we have the server, instead +-of creating a traditional server-side event/semaphore/mutex, instead create an +-'esync' primitive. These live in esync.c and are very slim objects; in fact, +-they don't even know what type of primitive they are. The server is involved +-at all because we still need a way of creating named objects, passing handles +-to another process, etc. +- +-The server creates an eventfd file descriptor with the requested parameters +-and passes it back to ntdll. ntdll creates an object of the appropriate type, +-then caches it in a table. This table is copied almost wholesale from the fd +-cache code in server.c. +- +-Specific operations follow quite straightforwardly from eventfd: +- +-* To release an object, or set an event, we simply write() to it. +-* An object is signalled if read() succeeds on it. Notably, we create all +- eventfd descriptors with O_NONBLOCK, so that we can atomically check if an +- object is signalled and grab it if it is. This also lets us reset events. +-* For objects whose state should not be reset upon waiting—e.g. manual-reset +- events—we simply check for the POLLIN flag instead of reading. +-* Semaphores are handled by the EFD_SEMAPHORE flag. This matches up quite well +- (although with some difficulties; see below). +-* Mutexes store their owner thread locally. This isn't reliable information if +- a different process's thread owns the mutex, but this doesn't matter—a +- thread should only care whether it owns the mutex, so it knows whether to +- try waiting on it or simply to increase the recursion count. +- +-The interesting part about esync is that (almost) all waits happen in ntdll, +-including those on server-bound objects. The idea here is that on the server +-side, for any waitable object, we create an eventfd file descriptor (not an +-esync primitive), and then pass it to ntdll if the program tries to wait on +-it. These are cached too, so only the first wait will require a round trip to +-the server. Then the server signals the file descriptor as appropriate, and +-thereby wakes up the client. So far this is implemented for processes, +-threads, message queues (difficult; see below), and device managers (necessary +-for drivers to work). All of these are necessarily server-bound, so we +-wouldn't really gain anything by signalling on the client side instead. Of +-course, except possibly for message queues, it's not likely that any program +-(cutting-edge D3D game or not) is going to be causing a great wineserver load +-by waiting on any of these objects; the motivation was rather to provide a way +-to wait on ntdll-bound and server-bound objects at the same time. +- +-Some cases are still passed to the server, and there's probably no reason not +-to keep them that way. Those that I noticed while testing include: async +-objects, which are internal to the file APIs and never exposed to userspace, +-startup_info objects, which are internal to the loader and signalled when a +-process starts, and keyed events, which are exposed through an ntdll API +-(although not through kernel32) but can't be mixed with other objects (you +-have to use NtWaitForKeyedEvent()). Other cases include: named pipes, debug +-events, sockets, and timers. It's unlikely we'll want to optimize debug events +-or sockets (or any of the other, rather rare, objects), but it is possible +-we'll want to optimize named pipes or timers. +- +-There were two sort of complications when working out the above. The first one +-was events. The trouble is that (1) the server actually creates some events by +-itself and (2) the server sometimes manipulates events passed by the +-client. Resolving the first case was easy enough, and merely entailed creating +-eventfd descriptors for the events the same way as for processes and threads +-(note that we don't really lose anything this way; the events include +-"LowMemoryCondition" and the event that signals system processes to shut +-down). For the second case I basically had to hook the server-side event +-functions to redirect to esync versions if the event was actually an esync +-primitive. +- +-The second complication was message queues. The difficulty here is that X11 +-signals events by writing into a pipe (at least I think it's a pipe?), and so +-as a result wineserver has to poll on that descriptor. In theory we could just +-let wineserver do so and then signal us as appropriate, except that wineserver +-only polls on the pipe when the thread is waiting for events (otherwise we'd +-get e.g. keyboard input while the thread is doing something else, and spin +-forever trying to wake up a thread that doesn't care). The obvious solution is +-just to poll on that fd ourselves, and that's what I did—it's just that +-getting the fd from wineserver was kind of ugly, and the code for waiting was +-also kind of ugly basically because we have to wait on both X11's fd and the +-"normal" process/thread-style wineserver fd that we use to signal sent +-messages. The upshot about the whole thing was that races are basically +-impossible, since a thread can only wait on its own queue. +- +-System APCs already work, since the server will forcibly suspend a thread if +-it's not already waiting, and so we just need to check for EINTR from +-poll(). User APCs and alertable waits are implemented in a similar style to +-message queues (well, sort of): whenever someone executes an alertable wait, +-we add an additional eventfd to the list, which the server signals when an APC +-arrives. If that eventfd gets signaled, we hand it off to the server to take +-care of, and return STATUS_USER_APC. +- +-Originally I kept the volatile state of semaphores and mutexes inside a +-variable local to the handle, with the knowledge that this would break if +-someone tried to open the handle elsewhere or duplicate it. It did, and so now +-this state is stored inside shared memory. This is of the POSIX variety, is +-allocated by the server (but never mapped there) and lives under the path +-"/wine-esync". +- +-There are a couple things that this infrastructure can't handle, although +-surprisingly there aren't that many. In particular: +-* Implementing wait-all, i.e. WaitForMultipleObjects(..., TRUE, ...), is not +- exactly possible the way we'd like it to be possible. In theory that +- function should wait until it knows all objects are available, then grab +- them all at once atomically. The server (like the kernel) can do this +- because the server is single-threaded and can't race with itself. We can't +- do this in ntdll, though. The approach I've taken I've laid out in great +- detail in the relevant patch, but for a quick summary we poll on each object +- until it's signaled (but don't grab it), check them all again, and if +- they're all signaled we try to grab them all at once in a tight loop, and if +- we fail on any of them we reset the count on whatever we shouldn't have +- consumed. Such a blip would necessarily be very quick. +-* The whole patchset only works on Linux, where eventfd is available. However, +- it should be possible to make it work on a Mac, since eventfd is just a +- quicker, easier way to use pipes (i.e. instead of writing 1 to the fd you'd +- write 1 byte; instead of reading a 64-bit value from the fd you'd read as +- many bytes as you can carry, which is admittedly less than 2**64 but +- can probably be something reasonable.) It's also possible, although I +- haven't yet looked, to use some different kind of synchronization +- primitives, but pipes would be easiest to tack onto this framework. +-* PulseEvent() can't work the way it's supposed to work. Fortunately it's rare +- and deprecated. It's also explicitly mentioned on MSDN that a thread can +- miss the notification for a kernel APC, so in a sense we're not necessarily +- doing anything wrong. +- +-There are some things that are perfectly implementable but that I just haven't +-done yet: +-* Other synchronizable server primitives. It's unlikely we'll need any of +- these, except perhaps named pipes (which would honestly be rather difficult) +- and (maybe) timers. +-* Access masks. We'd need to store these inside ntdll, and validate them when +- someone tries to execute esync operations. +- +-This patchset was inspired by Daniel Santos' "hybrid synchronization" +-patchset. My idea was to create a framework whereby even contended waits could +-be executed in userspace, eliminating a lot of the complexity that his +-synchronization primitives used. I do however owe some significant gratitude +-toward him for setting me on the right path. +- +-I've tried to maximize code separation, both to make any potential rebases +-easier and to ensure that esync is only active when configured. All code in +-existing source files is guarded with "if (do_esync())", and generally that +-condition is followed by "return esync_version_of_this_method(...);", where +-the latter lives in esync.c and is declared in esync.h. I've also tried to +-make the patchset very clear and readable—to write it as if I were going to +-submit it upstream. (Some intermediate patches do break things, which Wine is +-generally against, but I think it's for the better in this case.) I have cut +-some corners, though; there is some error checking missing, or implicit +-assumptions that the program is behaving correctly. +- +-I've tried to be careful about races. There are a lot of comments whose +-purpose are basically to assure me that races are impossible. In most cases we +-don't have to worry about races since all of the low-level synchronization is +-done by the kernel. +- +-Anyway, yeah, this is esync. Use it if you like. +- +---Zebediah Figura +diff --git a/configure b/configure +index 4d13060..58291ba 100755 +--- a/configure ++++ b/configure +@@ -8110,6 +8110,12 @@ if test "x$ac_cv_header_linux_major_h" = xyes + then : + printf "%s\n" "#define HAVE_LINUX_MAJOR_H 1" >>confdefs.h + ++fi ++ac_fn_c_check_header_compile "$LINENO" "linux/ntsync.h" "ac_cv_header_linux_ntsync_h" "$ac_includes_default" ++if test "x$ac_cv_header_linux_ntsync_h" = xyes ++then : ++ printf "%s\n" "#define HAVE_LINUX_NTSYNC_H 1" >>confdefs.h ++ + fi + ac_fn_c_check_header_compile "$LINENO" "linux/param.h" "ac_cv_header_linux_param_h" "$ac_includes_default" + if test "x$ac_cv_header_linux_param_h" = xyes +diff --git a/configure.ac b/configure.ac +index a3c414d..5af9132 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -403,6 +403,7 @@ AC_CHECK_HEADERS(\ + linux/input.h \ + linux/ioctl.h \ + linux/major.h \ ++ linux/ntsync.h \ + linux/param.h \ + linux/seccomp.h \ + linux/serial.h \ +@@ -433,7 +434,6 @@ AC_CHECK_HEADERS(\ + sys/cdio.h \ + sys/epoll.h \ + sys/event.h \ +- sys/eventfd.h \ + sys/extattr.h \ + sys/filio.h \ + sys/ipc.h \ +@@ -2094,7 +2094,6 @@ AC_CHECK_FUNCS(\ + port_create \ + posix_fadvise \ + posix_fallocate \ +- ppoll \ + prctl \ + proc_pidinfo \ + sched_yield \ +@@ -2120,12 +2119,6 @@ case $host_os in + ;; + esac + +-ac_save_LIBS=$LIBS +-AC_SEARCH_LIBS(shm_open, rt, +- [AC_DEFINE(HAVE_SHM_OPEN, 1, [Define to 1 if you have the `shm_open' function.]) +- test "$ac_res" = "none required" || AC_SUBST(RT_LIBS,"$ac_res")]) +-LIBS=$ac_save_LIBS +- + AC_CACHE_CHECK([for sched_setaffinity],wine_cv_have_sched_setaffinity, + AC_LINK_IFELSE([AC_LANG_PROGRAM( + [[#include ]], [[sched_setaffinity(0, 0, 0);]])],[wine_cv_have_sched_setaffinity=yes],[wine_cv_have_sched_setaffinity=no])) +diff --git a/dlls/kernel32/tests/sync.c b/dlls/kernel32/tests/sync.c +index 424cbe2..50e81aa 100644 +--- a/dlls/kernel32/tests/sync.c ++++ b/dlls/kernel32/tests/sync.c +@@ -57,7 +57,6 @@ static BOOLEAN (WINAPI *pTryAcquireSRWLockShared)(PSRWLOCK); + + static NTSTATUS (WINAPI *pNtAllocateVirtualMemory)(HANDLE, PVOID *, ULONG_PTR, SIZE_T *, ULONG, ULONG); + static NTSTATUS (WINAPI *pNtFreeVirtualMemory)(HANDLE, PVOID *, SIZE_T *, ULONG); +-static NTSTATUS (WINAPI *pNtQuerySystemTime)(LARGE_INTEGER *); + static NTSTATUS (WINAPI *pNtWaitForSingleObject)(HANDLE, BOOLEAN, const LARGE_INTEGER *); + static NTSTATUS (WINAPI *pNtWaitForMultipleObjects)(ULONG,const HANDLE*,BOOLEAN,BOOLEAN,const LARGE_INTEGER*); + static PSLIST_ENTRY (__fastcall *pRtlInterlockedPushListSList)(PSLIST_HEADER list, PSLIST_ENTRY first, +@@ -228,23 +227,8 @@ static void test_temporary_objects(void) + ok(GetLastError() == ERROR_FILE_NOT_FOUND, "wrong error %lu\n", GetLastError()); + } + +-static HANDLE mutex, mutex2, mutices[2]; +- +-static DWORD WINAPI mutex_thread( void *param ) +-{ +- DWORD expect = (DWORD)(DWORD_PTR)param; +- DWORD ret; +- +- ret = WaitForSingleObject( mutex, 0 ); +- ok(ret == expect, "expected %lu, got %lu\n", expect, ret); +- +- if (!ret) ReleaseMutex( mutex ); +- return 0; +-} +- + static void test_mutex(void) + { +- HANDLE thread; + DWORD wait_ret; + BOOL ret; + HANDLE hCreated; +@@ -284,8 +268,7 @@ static void test_mutex(void) + SetLastError(0xdeadbeef); + hOpened = OpenMutexA(GENERIC_READ | GENERIC_WRITE, FALSE, "WineTestMutex"); + ok(hOpened != NULL, "OpenMutex failed with error %ld\n", GetLastError()); +- wait_ret = WaitForSingleObject(hOpened, 0); +-todo_wine_if(getenv("WINEESYNC")) /* XFAIL: validation is not implemented */ ++ wait_ret = WaitForSingleObject(hOpened, INFINITE); + ok(wait_ret == WAIT_FAILED, "WaitForSingleObject succeeded\n"); + CloseHandle(hOpened); + +@@ -316,7 +299,6 @@ todo_wine_if(getenv("WINEESYNC")) /* XFAIL: validation is not implemented */ + + SetLastError(0xdeadbeef); + ret = ReleaseMutex(hCreated); +-todo_wine_if(getenv("WINEESYNC")) /* XFAIL: due to the above */ + ok(!ret && (GetLastError() == ERROR_NOT_OWNER), + "ReleaseMutex should have failed with ERROR_NOT_OWNER instead of %ld\n", GetLastError()); + +@@ -355,85 +337,6 @@ todo_wine_if(getenv("WINEESYNC")) /* XFAIL: due to the above */ + CloseHandle(hOpened); + + CloseHandle(hCreated); +- +- mutex = CreateMutexA( NULL, FALSE, NULL ); +- ok(!!mutex, "got error %lu\n", GetLastError()); +- +- ret = ReleaseMutex( mutex ); +- ok(!ret, "got %d\n", ret); +- ok(GetLastError() == ERROR_NOT_OWNER, "got error %lu\n", GetLastError()); +- +- for (i = 0; i < 100; i++) +- { +- ret = WaitForSingleObject( mutex, 0 ); +- ok(ret == 0, "got %u\n", ret); +- } +- +- for (i = 0; i < 100; i++) +- { +- ret = ReleaseMutex( mutex ); +- ok(ret, "got error %lu\n", GetLastError()); +- } +- +- ret = ReleaseMutex( mutex ); +- ok(!ret, "got %d\n", ret); +- ok(GetLastError() == ERROR_NOT_OWNER, "got error %lu\n", GetLastError()); +- +- thread = CreateThread( NULL, 0, mutex_thread, (void *)0, 0, NULL ); +- ret = WaitForSingleObject( thread, 2000 ); +- ok(ret == 0, "wait failed: %u\n", ret); +- +- WaitForSingleObject( mutex, 0 ); +- +- thread = CreateThread( NULL, 0, mutex_thread, (void *)WAIT_TIMEOUT, 0, NULL ); +- ret = WaitForSingleObject( thread, 2000 ); +- ok(ret == 0, "wait failed: %u\n", ret); +- +- ret = ReleaseMutex( mutex ); +- ok(ret, "got error %lu\n", GetLastError()); +- +- thread = CreateThread( NULL, 0, mutex_thread, (void *)0, 0, NULL ); +- ret = WaitForSingleObject( thread, 2000 ); +- ok(ret == 0, "wait failed: %u\n", ret); +- +- mutex2 = CreateMutexA( NULL, TRUE, NULL ); +- ok(!!mutex2, "got error %lu\n", GetLastError()); +- +- ret = ReleaseMutex( mutex2 ); +- ok(ret, "got error %lu\n", GetLastError()); +- +- ret = ReleaseMutex( mutex2 ); +- ok(!ret, "got %d\n", ret); +- ok(GetLastError() == ERROR_NOT_OWNER, "got error %lu\n", GetLastError()); +- +- mutices[0] = mutex; +- mutices[1] = mutex2; +- +- ret = WaitForMultipleObjects( 2, mutices, FALSE, 0 ); +- ok(ret == 0, "got %u\n", ret); +- +- ret = ReleaseMutex( mutex ); +- ok(ret, "got error %lu\n", GetLastError()); +- +- ret = ReleaseMutex( mutex2 ); +- ok(!ret, "got %d\n", ret); +- ok(GetLastError() == ERROR_NOT_OWNER, "got error %lu\n", GetLastError()); +- +- ret = WaitForMultipleObjects( 2, mutices, TRUE, 0 ); +- ok(ret == 0, "got %u\n", ret); +- +- ret = ReleaseMutex( mutex ); +- ok(ret, "got error %lu\n", GetLastError()); +- +- ret = ReleaseMutex( mutex2 ); +- ok(ret, "got error %lu\n", GetLastError()); +- +- ret = CloseHandle( mutex ); +- ok(ret, "got error %lu\n", GetLastError()); +- +- ret = CloseHandle( mutex2 ); +- ok(ret, "got error %lu\n", GetLastError()); +- + } + + static void test_slist(void) +@@ -609,13 +512,12 @@ static void test_slist(void) + + static void test_event(void) + { +- HANDLE handle, handle2, handles[2]; ++ HANDLE handle, handle2; + SECURITY_ATTRIBUTES sa; + SECURITY_DESCRIPTOR sd; + ACL acl; + DWORD ret; + BOOL val; +- int i; + + /* no sd */ + handle = CreateEventA(NULL, FALSE, FALSE, __FILE__ ": Test Event"); +@@ -719,130 +621,11 @@ static void test_event(void) + ok( ret, "QueryMemoryResourceNotification failed err %lu\n", GetLastError() ); + ok( val == FALSE || val == TRUE, "wrong value %u\n", val ); + CloseHandle( handle ); +- +- handle = CreateEventA( NULL, TRUE, FALSE, NULL ); +- ok(!!handle, "got error %lu\n", GetLastError()); +- +- ret = WaitForSingleObject( handle, 0 ); +- ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); +- +- ret = SetEvent( handle ); +- ok(ret, "got error %lu\n", GetLastError()); +- +- ret = SetEvent( handle ); +- ok(ret, "got error %lu\n", GetLastError()); +- +- for (i = 0; i < 100; i++) +- { +- ret = WaitForSingleObject( handle, 0 ); +- ok(ret == 0, "got %lu\n", ret); +- } +- +- ret = ResetEvent( handle ); +- ok(ret, "got error %lu\n", GetLastError()); +- +- ret = ResetEvent( handle ); +- ok(ret, "got error %lu\n", GetLastError()); +- +- ret = WaitForSingleObject( handle, 0 ); +- ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); +- +- handle2 = CreateEventA( NULL, FALSE, TRUE, NULL ); +- ok(!!handle2, "got error %lu\n", GetLastError()); +- +- ret = WaitForSingleObject( handle2, 0 ); +- ok(ret == 0, "got %lu\n", ret); +- +- ret = WaitForSingleObject( handle2, 0 ); +- ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); +- +- ret = SetEvent( handle2 ); +- ok(ret, "got error %lu\n", GetLastError()); +- +- ret = SetEvent( handle2 ); +- ok(ret, "got error %lu\n", GetLastError()); +- +- ret = ResetEvent( handle2 ); +- ok(ret, "got error %lu\n", GetLastError()); +- +- ret = ResetEvent( handle2 ); +- ok(ret, "got error %lu\n", GetLastError()); +- +- ret = WaitForSingleObject( handle2, 0 ); +- ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); +- +- handles[0] = handle; +- handles[1] = handle2; +- +- ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); +- ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); +- +- SetEvent( handle ); +- SetEvent( handle2 ); +- +- ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); +- ok(ret == 0, "got %lu\n", ret); +- +- ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); +- ok(ret == 0, "got %lu\n", ret); +- +- ret = WaitForSingleObject( handle2, 0 ); +- ok(ret == 0, "got %lu\n", ret); +- +- ResetEvent( handle ); +- SetEvent( handle2 ); +- +- ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); +- ok(ret == 1, "got %lu\n", ret); +- +- ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); +- ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); +- +- SetEvent( handle ); +- SetEvent( handle2 ); +- +- ret = WaitForMultipleObjects( 2, handles, TRUE, 0 ); +- ok(ret == 0, "got %lu\n", ret); +- +- ret = WaitForMultipleObjects( 2, handles, TRUE, 0 ); +- ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); +- +- SetEvent( handle2 ); +- ResetEvent( handle ); +- +- ret = WaitForMultipleObjects( 2, handles, TRUE, 0 ); +- ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); +- +- ret = WaitForSingleObject( handle2, 0 ); +- ok(ret == 0, "got %lu\n", ret); +- +- handles[0] = handle2; +- handles[1] = handle; +- SetEvent( handle ); +- SetEvent( handle2 ); +- +- ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); +- ok(ret == 0, "got %lu\n", ret); +- +- ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); +- ok(ret == 1, "got %lu\n", ret); +- +- ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); +- ok(ret == 1, "got %lu\n", ret); +- +- ret = CloseHandle( handle ); +- ok(ret, "got error %lu\n", GetLastError()); +- +- ret = CloseHandle( handle2 ); +- ok(ret, "got error %lu\n", GetLastError()); + } + + static void test_semaphore(void) + { +- HANDLE handle, handle2, handles[2]; +- DWORD ret; +- LONG prev; +- int i; ++ HANDLE handle, handle2; + + /* test case sensitivity */ + +@@ -884,99 +667,6 @@ static void test_semaphore(void) + ok( GetLastError() == ERROR_INVALID_PARAMETER, "wrong error %lu\n", GetLastError()); + + CloseHandle( handle ); +- +- handle = CreateSemaphoreA( NULL, 0, 5, NULL ); +- ok(!!handle, "CreateSemaphore failed: %lu\n", GetLastError()); +- +- ret = WaitForSingleObject( handle, 0 ); +- ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); +- +- ret = ReleaseSemaphore( handle, 1, &prev ); +- ok(ret, "got error %lu\n", GetLastError()); +- ok(prev == 0, "got prev %ld\n", prev); +- +- ret = ReleaseSemaphore( handle, 1, &prev ); +- ok(ret, "got error %lu\n", GetLastError()); +- ok(prev == 1, "got prev %ld\n", prev); +- +- ret = ReleaseSemaphore( handle, 5, &prev ); +- ok(!ret, "got %ld\n", ret); +- ok(GetLastError() == ERROR_TOO_MANY_POSTS, "got error %lu\n", GetLastError()); +- ok(prev == 1, "got prev %ld\n", prev); +- +- ret = ReleaseSemaphore( handle, 2, &prev ); +- ok(ret, "got error %lu\n", GetLastError()); +- ok(prev == 2, "got prev %ld\n", prev); +- +- ret = ReleaseSemaphore( handle, 1, &prev ); +- ok(ret, "got error %lu\n", GetLastError()); +- ok(prev == 4, "got prev %ld\n", prev); +- +- for (i = 0; i < 5; i++) +- { +- ret = WaitForSingleObject( handle, 0 ); +- ok(ret == 0, "got %lu\n", ret); +- } +- +- ret = WaitForSingleObject( handle, 0 ); +- ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); +- +- handle2 = CreateSemaphoreA( NULL, 3, 5, NULL ); +- ok(!!handle2, "CreateSemaphore failed: %lu\n", GetLastError()); +- +- ret = ReleaseSemaphore( handle2, 1, &prev ); +- ok(ret, "got error %lu\n", GetLastError()); +- ok(prev == 3, "got prev %ld\n", prev); +- +- for (i = 0; i < 4; i++) +- { +- ret = WaitForSingleObject( handle2, 0 ); +- ok(ret == 0, "got %lu\n", ret); +- } +- +- ret = WaitForSingleObject( handle2, 0 ); +- ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); +- +- handles[0] = handle; +- handles[1] = handle2; +- +- ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); +- ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); +- +- ReleaseSemaphore( handle, 1, NULL ); +- ReleaseSemaphore( handle2, 1, NULL ); +- +- ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); +- ok(ret == 0, "got %lu\n", ret); +- +- ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); +- ok(ret == 1, "got %lu\n", ret); +- +- ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); +- ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); +- +- ReleaseSemaphore( handle, 1, NULL ); +- ReleaseSemaphore( handle2, 1, NULL ); +- +- ret = WaitForMultipleObjects( 2, handles, TRUE, 0 ); +- ok(ret == 0, "got %lu\n", ret); +- +- ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); +- ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); +- +- ReleaseSemaphore( handle, 1, NULL ); +- +- ret = WaitForMultipleObjects( 2, handles, TRUE, 0 ); +- ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); +- +- ret = WaitForSingleObject( handle, 0 ); +- ok(ret == 0, "got %lu\n", ret); +- +- ret = CloseHandle( handle ); +- ok(ret, "got error %lu\n", ret); +- +- ret = CloseHandle( handle2 ); +- ok(ret, "got error %lu\n", ret); + } + + static void test_waitable_timer(void) +@@ -1531,15 +1221,11 @@ static HANDLE modify_handle(HANDLE handle, DWORD modify) + return ULongToHandle(tmp); + } + +-#define TIMEOUT_INFINITE (((LONGLONG)0x7fffffff) << 32 | 0xffffffff) +- + static void test_WaitForSingleObject(void) + { + HANDLE signaled, nonsignaled, invalid; +- LARGE_INTEGER ntnow, ntthen; + LARGE_INTEGER timeout; + NTSTATUS status; +- DWORD now, then; + DWORD ret; + + signaled = CreateEventW(NULL, TRUE, TRUE, NULL); +@@ -1624,68 +1310,6 @@ static void test_WaitForSingleObject(void) + status = pNtWaitForSingleObject(GetCurrentThread(), FALSE, &timeout); + ok(status == STATUS_TIMEOUT, "expected STATUS_TIMEOUT, got %08lx\n", status); + +- ret = WaitForSingleObject( signaled, 0 ); +- ok(ret == 0, "got %lu\n", ret); +- +- ret = WaitForSingleObject( nonsignaled, 0 ); +- ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); +- +- /* test that a timed wait actually does wait */ +- now = GetTickCount(); +- ret = WaitForSingleObject( nonsignaled, 100 ); +- then = GetTickCount(); +- ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); +- ok(abs((then - now) - 100) < 5, "got %lu ms\n", then - now); +- +- now = GetTickCount(); +- ret = WaitForSingleObject( signaled, 100 ); +- then = GetTickCount(); +- ok(ret == 0, "got %lu\n", ret); +- ok(abs(then - now) < 5, "got %lu ms\n", then - now); +- +- ret = WaitForSingleObject( signaled, INFINITE ); +- ok(ret == 0, "got %lu\n", ret); +- +- /* test NT timeouts */ +- pNtQuerySystemTime( &ntnow ); +- timeout.QuadPart = ntnow.QuadPart + 100 * 10000; +- status = pNtWaitForSingleObject( nonsignaled, FALSE, &timeout ); +- pNtQuerySystemTime( &ntthen ); +- ok(status == STATUS_TIMEOUT, "got %#lx\n", status); +- ok(abs(((ntthen.QuadPart - ntnow.QuadPart) / 10000) - 100) < 5, "got %s ns\n", +- wine_dbgstr_longlong((ntthen.QuadPart - ntnow.QuadPart) * 100)); +- +- pNtQuerySystemTime( &ntnow ); +- timeout.QuadPart = -100 * 10000; +- status = pNtWaitForSingleObject( nonsignaled, FALSE, &timeout ); +- pNtQuerySystemTime( &ntthen ); +- ok(status == STATUS_TIMEOUT, "got %#lx\n", status); +- ok(abs(((ntthen.QuadPart - ntnow.QuadPart) / 10000) - 100) < 5, "got %s ns\n", +- wine_dbgstr_longlong((ntthen.QuadPart - ntnow.QuadPart) * 100)); +- +- status = pNtWaitForSingleObject( signaled, FALSE, NULL ); +- ok(status == 0, "got %#lx\n", status); +- +- timeout.QuadPart = TIMEOUT_INFINITE; +- status = pNtWaitForSingleObject( signaled, FALSE, &timeout ); +- ok(status == 0, "got %#lx\n", status); +- +- pNtQuerySystemTime( &ntnow ); +- timeout.QuadPart = ntnow.QuadPart; +- status = pNtWaitForSingleObject( nonsignaled, FALSE, &timeout ); +- pNtQuerySystemTime( &ntthen ); +- ok(status == STATUS_TIMEOUT, "got %#lx\n", status); +- ok(abs((ntthen.QuadPart - ntnow.QuadPart) / 10000) < 5, "got %s ns\n", +- wine_dbgstr_longlong((ntthen.QuadPart - ntnow.QuadPart) * 100)); +- +- pNtQuerySystemTime( &ntnow ); +- timeout.QuadPart = ntnow.QuadPart - 100 * 10000; +- status = pNtWaitForSingleObject( nonsignaled, FALSE, &timeout ); +- pNtQuerySystemTime( &ntthen ); +- ok(status == STATUS_TIMEOUT, "got %#lx\n", status); +- ok(abs((ntthen.QuadPart - ntnow.QuadPart) / 10000) < 5, "got %s ns\n", +- wine_dbgstr_longlong((ntthen.QuadPart - ntnow.QuadPart) * 100)); +- + CloseHandle(signaled); + CloseHandle(nonsignaled); + } +@@ -3351,7 +2975,6 @@ START_TEST(sync) + pTryAcquireSRWLockShared = (void *)GetProcAddress(hdll, "TryAcquireSRWLockShared"); + pNtAllocateVirtualMemory = (void *)GetProcAddress(hntdll, "NtAllocateVirtualMemory"); + pNtFreeVirtualMemory = (void *)GetProcAddress(hntdll, "NtFreeVirtualMemory"); +- pNtQuerySystemTime = (void *)GetProcAddress(hntdll, "NtQuerySystemTime"); + pNtWaitForSingleObject = (void *)GetProcAddress(hntdll, "NtWaitForSingleObject"); + pNtWaitForMultipleObjects = (void *)GetProcAddress(hntdll, "NtWaitForMultipleObjects"); + pRtlInterlockedPushListSList = (void *)GetProcAddress(hntdll, "RtlInterlockedPushListSList"); +diff --git a/dlls/ntdll/Makefile.in b/dlls/ntdll/Makefile.in +index 4629457..f71f79b 100644 +--- a/dlls/ntdll/Makefile.in ++++ b/dlls/ntdll/Makefile.in +@@ -48,7 +48,6 @@ SOURCES = \ + unix/cdrom.c \ + unix/debug.c \ + unix/env.c \ +- unix/esync.c \ + unix/file.c \ + unix/loader.c \ + unix/loadorder.c \ +diff --git a/dlls/ntdll/unix/esync.c b/dlls/ntdll/unix/esync.c +deleted file mode 100644 +index edfeb03..0000000 +--- a/dlls/ntdll/unix/esync.c ++++ /dev/null +@@ -1,1325 +0,0 @@ +-/* +- * eventfd-based synchronization objects +- * +- * Copyright (C) 2018 Zebediah Figura +- * +- * This library is free software; you can redistribute it and/or +- * modify it under the terms of the GNU Lesser General Public +- * License as published by the Free Software Foundation; either +- * version 2.1 of the License, or (at your option) any later version. +- * +- * This library is distributed in the hope that it will be useful, +- * but WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- * Lesser General Public License for more details. +- * +- * You should have received a copy of the GNU Lesser General Public +- * License along with this library; if not, write to the Free Software +- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA +- */ +- +-#if 0 +-#pragma makedep unix +-#endif +- +-#ifndef _GNU_SOURCE +-#define _GNU_SOURCE +-#endif +- +-#include "config.h" +- +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#ifdef HAVE_SYS_STAT_H +-# include +-#endif +-#include +-#include +-#include +- +-#include "ntstatus.h" +-#define WIN32_NO_STATUS +-#include "windef.h" +-#include "winternl.h" +-#include "wine/server.h" +-#include "wine/debug.h" +- +-#include "unix_private.h" +-#include "esync.h" +- +-WINE_DEFAULT_DEBUG_CHANNEL(esync); +- +-int do_esync(void) +-{ +-#ifdef HAVE_SYS_EVENTFD_H +- static int do_esync_cached = -1; +- +- if (do_esync_cached == -1) +- do_esync_cached = getenv("WINEESYNC") && atoi(getenv("WINEESYNC")); +- +- return do_esync_cached; +-#else +- static int once; +- if (!once++) +- FIXME("eventfd not supported on this platform.\n"); +- return 0; +-#endif +-} +- +-struct esync +-{ +- enum esync_type type; +- int fd; +- void *shm; +-}; +- +-struct semaphore +-{ +- int max; +- int count; +-}; +-C_ASSERT(sizeof(struct semaphore) == 8); +- +-struct mutex +-{ +- DWORD tid; +- int count; /* recursion count */ +-}; +-C_ASSERT(sizeof(struct mutex) == 8); +- +-struct event +-{ +- int signaled; +- int locked; +-}; +-C_ASSERT(sizeof(struct event) == 8); +- +-static char shm_name[29]; +-static int shm_fd; +-static void **shm_addrs; +-static int shm_addrs_size; /* length of the allocated shm_addrs array */ +-static long pagesize; +- +-static pthread_mutex_t shm_addrs_mutex = PTHREAD_MUTEX_INITIALIZER; +- +-static void *get_shm( unsigned int idx ) +-{ +- int entry = (idx * 8) / pagesize; +- int offset = (idx * 8) % pagesize; +- void *ret; +- +- pthread_mutex_lock( &shm_addrs_mutex ); +- +- if (entry >= shm_addrs_size) +- { +- int new_size = max(shm_addrs_size * 2, entry + 1); +- +- if (!(shm_addrs = realloc( shm_addrs, new_size * sizeof(shm_addrs[0]) ))) +- ERR("Failed to grow shm_addrs array to size %d.\n", shm_addrs_size); +- memset( shm_addrs + shm_addrs_size, 0, (new_size - shm_addrs_size) * sizeof(shm_addrs[0]) ); +- shm_addrs_size = new_size; +- } +- +- if (!shm_addrs[entry]) +- { +- void *addr = mmap( NULL, pagesize, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, entry * pagesize ); +- if (addr == (void *)-1) +- ERR("Failed to map page %d (offset %#lx).\n", entry, entry * pagesize); +- +- TRACE("Mapping page %d at %p.\n", entry, addr); +- +- if (InterlockedCompareExchangePointer( &shm_addrs[entry], addr, 0 )) +- munmap( addr, pagesize ); /* someone beat us to it */ +- } +- +- ret = (void *)((unsigned long)shm_addrs[entry] + offset); +- +- pthread_mutex_unlock( &shm_addrs_mutex ); +- +- return ret; +-} +- +-/* We'd like lookup to be fast. To that end, we use a static list indexed by handle. +- * This is copied and adapted from the fd cache code. */ +- +-#define ESYNC_LIST_BLOCK_SIZE (65536 / sizeof(struct esync)) +-#define ESYNC_LIST_ENTRIES 256 +- +-static struct esync *esync_list[ESYNC_LIST_ENTRIES]; +-static struct esync esync_list_initial_block[ESYNC_LIST_BLOCK_SIZE]; +- +-static inline UINT_PTR handle_to_index( HANDLE handle, UINT_PTR *entry ) +-{ +- UINT_PTR idx = (((UINT_PTR)handle) >> 2) - 1; +- *entry = idx / ESYNC_LIST_BLOCK_SIZE; +- return idx % ESYNC_LIST_BLOCK_SIZE; +-} +- +-static struct esync *add_to_list( HANDLE handle, enum esync_type type, int fd, void *shm ) +-{ +- UINT_PTR entry, idx = handle_to_index( handle, &entry ); +- +- if (entry >= ESYNC_LIST_ENTRIES) +- { +- FIXME( "too many allocated handles, not caching %p\n", handle ); +- return FALSE; +- } +- +- if (!esync_list[entry]) /* do we need to allocate a new block of entries? */ +- { +- if (!entry) esync_list[0] = esync_list_initial_block; +- else +- { +- void *ptr = anon_mmap_alloc( ESYNC_LIST_BLOCK_SIZE * sizeof(struct esync), +- PROT_READ | PROT_WRITE ); +- if (ptr == MAP_FAILED) return FALSE; +- esync_list[entry] = ptr; +- } +- } +- +- if (!InterlockedCompareExchange( (int *)&esync_list[entry][idx].type, type, 0 )) +- { +- esync_list[entry][idx].fd = fd; +- esync_list[entry][idx].shm = shm; +- } +- return &esync_list[entry][idx]; +-} +- +-static struct esync *get_cached_object( HANDLE handle ) +-{ +- UINT_PTR entry, idx = handle_to_index( handle, &entry ); +- +- if (entry >= ESYNC_LIST_ENTRIES || !esync_list[entry]) return NULL; +- if (!esync_list[entry][idx].type) return NULL; +- +- return &esync_list[entry][idx]; +-} +- +-/* Gets an object. This is either a proper esync object (i.e. an event, +- * semaphore, etc. created using create_esync) or a generic synchronizable +- * server-side object which the server will signal (e.g. a process, thread, +- * message queue, etc.) */ +-static NTSTATUS get_object( HANDLE handle, struct esync **obj ) +-{ +- NTSTATUS ret = STATUS_SUCCESS; +- enum esync_type type = 0; +- unsigned int shm_idx = 0; +- obj_handle_t fd_handle; +- sigset_t sigset; +- int fd = -1; +- +- if ((*obj = get_cached_object( handle ))) return STATUS_SUCCESS; +- +- if ((INT_PTR)handle < 0) +- { +- /* We can deal with pseudo-handles, but it's just easier this way */ +- return STATUS_NOT_IMPLEMENTED; +- } +- +- if (!handle) +- { +- /* Shadow of the Tomb Raider really likes passing in NULL handles to +- * various functions. Concerning, but let's avoid a server call. */ +- return STATUS_INVALID_HANDLE; +- } +- +- /* We need to try grabbing it from the server. */ +- server_enter_uninterrupted_section( &fd_cache_mutex, &sigset ); +- if (!(*obj = get_cached_object( handle ))) +- { +- SERVER_START_REQ( get_esync_fd ) +- { +- req->handle = wine_server_obj_handle( handle ); +- if (!(ret = wine_server_call( req ))) +- { +- type = reply->type; +- shm_idx = reply->shm_idx; +- fd = receive_fd( &fd_handle ); +- assert( wine_server_ptr_handle(fd_handle) == handle ); +- } +- } +- SERVER_END_REQ; +- } +- server_leave_uninterrupted_section( &fd_cache_mutex, &sigset ); +- +- if (*obj) +- { +- /* We managed to grab it while in the CS; return it. */ +- return STATUS_SUCCESS; +- } +- +- if (ret) +- { +- WARN("Failed to retrieve fd for handle %p, status %#x.\n", handle, ret); +- *obj = NULL; +- return ret; +- } +- +- TRACE("Got fd %d for handle %p.\n", fd, handle); +- +- *obj = add_to_list( handle, type, fd, shm_idx ? get_shm( shm_idx ) : 0 ); +- return ret; +-} +- +-NTSTATUS esync_close( HANDLE handle ) +-{ +- UINT_PTR entry, idx = handle_to_index( handle, &entry ); +- +- TRACE("%p.\n", handle); +- +- if (entry < ESYNC_LIST_ENTRIES && esync_list[entry]) +- { +- if (InterlockedExchange((int *)&esync_list[entry][idx].type, 0)) +- { +- close( esync_list[entry][idx].fd ); +- return STATUS_SUCCESS; +- } +- } +- +- return STATUS_INVALID_HANDLE; +-} +- +-static NTSTATUS create_esync( enum esync_type type, HANDLE *handle, ACCESS_MASK access, +- const OBJECT_ATTRIBUTES *attr, int initval, int max ) +-{ +- NTSTATUS ret; +- data_size_t len; +- struct object_attributes *objattr; +- obj_handle_t fd_handle; +- unsigned int shm_idx; +- sigset_t sigset; +- int fd; +- +- if ((ret = alloc_object_attributes( attr, &objattr, &len ))) return ret; +- +- /* We have to synchronize on the fd cache CS so that our calls to +- * receive_fd don't race with theirs. */ +- server_enter_uninterrupted_section( &fd_cache_mutex, &sigset ); +- SERVER_START_REQ( create_esync ) +- { +- req->access = access; +- req->initval = initval; +- req->type = type; +- req->max = max; +- wine_server_add_data( req, objattr, len ); +- ret = wine_server_call( req ); +- if (!ret || ret == STATUS_OBJECT_NAME_EXISTS) +- { +- *handle = wine_server_ptr_handle( reply->handle ); +- type = reply->type; +- shm_idx = reply->shm_idx; +- fd = receive_fd( &fd_handle ); +- assert( wine_server_ptr_handle(fd_handle) == *handle ); +- } +- } +- SERVER_END_REQ; +- server_leave_uninterrupted_section( &fd_cache_mutex, &sigset ); +- +- if (!ret || ret == STATUS_OBJECT_NAME_EXISTS) +- { +- add_to_list( *handle, type, fd, shm_idx ? get_shm( shm_idx ) : 0 ); +- TRACE("-> handle %p, fd %d.\n", *handle, fd); +- } +- +- free( objattr ); +- return ret; +-} +- +-static NTSTATUS open_esync( enum esync_type type, HANDLE *handle, +- ACCESS_MASK access, const OBJECT_ATTRIBUTES *attr ) +-{ +- NTSTATUS ret; +- obj_handle_t fd_handle; +- unsigned int shm_idx; +- sigset_t sigset; +- int fd; +- +- server_enter_uninterrupted_section( &fd_cache_mutex, &sigset ); +- SERVER_START_REQ( open_esync ) +- { +- req->access = access; +- req->attributes = attr->Attributes; +- req->rootdir = wine_server_obj_handle( attr->RootDirectory ); +- req->type = type; +- if (attr->ObjectName) +- wine_server_add_data( req, attr->ObjectName->Buffer, attr->ObjectName->Length ); +- if (!(ret = wine_server_call( req ))) +- { +- *handle = wine_server_ptr_handle( reply->handle ); +- type = reply->type; +- shm_idx = reply->shm_idx; +- fd = receive_fd( &fd_handle ); +- assert( wine_server_ptr_handle(fd_handle) == *handle ); +- } +- } +- SERVER_END_REQ; +- server_leave_uninterrupted_section( &fd_cache_mutex, &sigset ); +- +- if (!ret) +- { +- add_to_list( *handle, type, fd, shm_idx ? get_shm( shm_idx ) : 0 ); +- +- TRACE("-> handle %p, fd %d.\n", *handle, fd); +- } +- return ret; +-} +- +-extern NTSTATUS esync_create_semaphore(HANDLE *handle, ACCESS_MASK access, +- const OBJECT_ATTRIBUTES *attr, LONG initial, LONG max) +-{ +- TRACE("name %s, initial %d, max %d.\n", +- attr ? debugstr_us(attr->ObjectName) : "", initial, max); +- +- return create_esync( ESYNC_SEMAPHORE, handle, access, attr, initial, max ); +-} +- +-NTSTATUS esync_open_semaphore( HANDLE *handle, ACCESS_MASK access, +- const OBJECT_ATTRIBUTES *attr ) +-{ +- TRACE("name %s.\n", debugstr_us(attr->ObjectName)); +- +- return open_esync( ESYNC_SEMAPHORE, handle, access, attr ); +-} +- +-NTSTATUS esync_release_semaphore( HANDLE handle, ULONG count, ULONG *prev ) +-{ +- struct esync *obj; +- struct semaphore *semaphore; +- uint64_t count64 = count; +- ULONG current; +- NTSTATUS ret; +- +- TRACE("%p, %d, %p.\n", handle, count, prev); +- +- if ((ret = get_object( handle, &obj))) return ret; +- semaphore = obj->shm; +- +- do +- { +- current = semaphore->count; +- +- if (count + current > semaphore->max) +- return STATUS_SEMAPHORE_LIMIT_EXCEEDED; +- } while (InterlockedCompareExchange( &semaphore->count, count + current, current ) != current); +- +- if (prev) *prev = current; +- +- /* We don't have to worry about a race between increasing the count and +- * write(). The fact that we were able to increase the count means that we +- * have permission to actually write that many releases to the semaphore. */ +- +- if (write( obj->fd, &count64, sizeof(count64) ) == -1) +- return errno_to_status( errno ); +- +- return STATUS_SUCCESS; +-} +- +-NTSTATUS esync_query_semaphore( HANDLE handle, void *info, ULONG *ret_len ) +-{ +- struct esync *obj; +- struct semaphore *semaphore; +- SEMAPHORE_BASIC_INFORMATION *out = info; +- NTSTATUS ret; +- +- TRACE("handle %p, info %p, ret_len %p.\n", handle, info, ret_len); +- +- if ((ret = get_object( handle, &obj ))) return ret; +- semaphore = obj->shm; +- +- out->CurrentCount = semaphore->count; +- out->MaximumCount = semaphore->max; +- if (ret_len) *ret_len = sizeof(*out); +- +- return STATUS_SUCCESS; +-} +- +-NTSTATUS esync_create_event( HANDLE *handle, ACCESS_MASK access, +- const OBJECT_ATTRIBUTES *attr, EVENT_TYPE event_type, BOOLEAN initial ) +-{ +- enum esync_type type = (event_type == SynchronizationEvent ? ESYNC_AUTO_EVENT : ESYNC_MANUAL_EVENT); +- +- TRACE("name %s, %s-reset, initial %d.\n", +- attr ? debugstr_us(attr->ObjectName) : "", +- event_type == NotificationEvent ? "manual" : "auto", initial); +- +- return create_esync( type, handle, access, attr, initial, 0 ); +-} +- +-NTSTATUS esync_open_event( HANDLE *handle, ACCESS_MASK access, +- const OBJECT_ATTRIBUTES *attr ) +-{ +- TRACE("name %s.\n", debugstr_us(attr->ObjectName)); +- +- return open_esync( ESYNC_AUTO_EVENT, handle, access, attr ); /* doesn't matter which */ +-} +- +-static inline void small_pause(void) +-{ +-#ifdef __i386__ +- __asm__ __volatile__( "rep;nop" : : : "memory" ); +-#else +- __asm__ __volatile__( "" : : : "memory" ); +-#endif +-} +- +-/* Manual-reset events are actually racier than other objects in terms of shm +- * state. With other objects, races don't matter, because we only treat the shm +- * state as a hint that lets us skip poll()—we still have to read(). But with +- * manual-reset events we don't, which means that the shm state can be out of +- * sync with the actual state. +- * +- * In general we shouldn't have to worry about races between modifying the +- * event and waiting on it. If the state changes while we're waiting, it's +- * equally plausible that we caught it before or after the state changed. +- * However, we can have races between SetEvent() and ResetEvent(), so that the +- * event has inconsistent internal state. +- * +- * To solve this we have to use the other field to lock the event. Currently +- * this is implemented as a spinlock, but I'm not sure if a futex might be +- * better. I'm also not sure if it's possible to obviate locking by arranging +- * writes and reads in a certain way. +- * +- * Note that we don't have to worry about locking in esync_wait_objects(). +- * There's only two general patterns: +- * +- * WaitFor() SetEvent() +- * ------------------------- +- * read() +- * signaled = 0 +- * signaled = 1 +- * write() +- * ------------------------- +- * read() +- * signaled = 1 +- * signaled = 0 +- * +- * ------------------------- +- * +- * That is, if SetEvent() tries to signal the event before WaitFor() resets its +- * signaled state, it won't bother trying to write(), and then the signaled +- * state will be reset, so the result is a consistent non-signaled event. +- * There's several variations to this pattern but all of them are protected in +- * the same way. Note however this is why we have to use interlocked_xchg() +- * event inside of the lock. +- */ +- +-/* Removing this spinlock is harder than it looks. esync_wait_objects() can +- * deal with inconsistent state well enough, and a race between SetEvent() and +- * ResetEvent() gives us license to yield either result as long as we act +- * consistently, but that's not enough. Notably, esync_wait_objects() should +- * probably act like a fence, so that the second half of esync_set_event() does +- * not seep past a subsequent reset. That's one problem, but no guarantee there +- * aren't others. */ +- +-NTSTATUS esync_set_event( HANDLE handle ) +-{ +- static const uint64_t value = 1; +- struct esync *obj; +- struct event *event; +- NTSTATUS ret; +- +- TRACE("%p.\n", handle); +- +- if ((ret = get_object( handle, &obj ))) return ret; +- event = obj->shm; +- +- if (obj->type == ESYNC_MANUAL_EVENT) +- { +- /* Acquire the spinlock. */ +- while (InterlockedCompareExchange( &event->locked, 1, 0 )) +- small_pause(); +- } +- +- /* For manual-reset events, as long as we're in a lock, we can take the +- * optimization of only calling write() if the event wasn't already +- * signaled. +- * +- * For auto-reset events, esync_wait_objects() must grab the kernel object. +- * Thus if we got into a race so that the shm state is signaled but the +- * eventfd is unsignaled (i.e. reset shm, set shm, set fd, reset fd), we +- * *must* signal the fd now, or any waiting threads will never wake up. */ +- +- if (!InterlockedExchange( &event->signaled, 1 ) || obj->type == ESYNC_AUTO_EVENT) +- { +- if (write( obj->fd, &value, sizeof(value) ) == -1) +- ERR("write: %s\n", strerror(errno)); +- } +- +- if (obj->type == ESYNC_MANUAL_EVENT) +- { +- /* Release the spinlock. */ +- event->locked = 0; +- } +- +- return STATUS_SUCCESS; +-} +- +-NTSTATUS esync_reset_event( HANDLE handle ) +-{ +- uint64_t value; +- struct esync *obj; +- struct event *event; +- NTSTATUS ret; +- +- TRACE("%p.\n", handle); +- +- if ((ret = get_object( handle, &obj ))) return ret; +- event = obj->shm; +- +- if (obj->type == ESYNC_MANUAL_EVENT) +- { +- /* Acquire the spinlock. */ +- while (InterlockedCompareExchange( &event->locked, 1, 0 )) +- small_pause(); +- } +- +- /* For manual-reset events, as long as we're in a lock, we can take the +- * optimization of only calling read() if the event was already signaled. +- * +- * For auto-reset events, we have no guarantee that the previous "signaled" +- * state is actually correct. We need to leave both states unsignaled after +- * leaving this function, so we always have to read(). */ +- if (InterlockedExchange( &event->signaled, 0 ) || obj->type == ESYNC_AUTO_EVENT) +- { +- if (read( obj->fd, &value, sizeof(value) ) == -1 && errno != EWOULDBLOCK && errno != EAGAIN) +- { +- ERR("read: %s\n", strerror(errno)); +- } +- } +- +- if (obj->type == ESYNC_MANUAL_EVENT) +- { +- /* Release the spinlock. */ +- event->locked = 0; +- } +- +- return STATUS_SUCCESS; +-} +- +-NTSTATUS esync_pulse_event( HANDLE handle ) +-{ +- uint64_t value = 1; +- struct esync *obj; +- NTSTATUS ret; +- +- TRACE("%p.\n", handle); +- +- if ((ret = get_object( handle, &obj ))) return ret; +- +- /* This isn't really correct; an application could miss the write. +- * Unfortunately we can't really do much better. Fortunately this is rarely +- * used (and publicly deprecated). */ +- if (write( obj->fd, &value, sizeof(value) ) == -1) +- return errno_to_status( errno ); +- +- /* Try to give other threads a chance to wake up. Hopefully erring on this +- * side is the better thing to do... */ +- NtYieldExecution(); +- +- read( obj->fd, &value, sizeof(value) ); +- +- return STATUS_SUCCESS; +-} +- +-NTSTATUS esync_query_event( HANDLE handle, void *info, ULONG *ret_len ) +-{ +- struct esync *obj; +- EVENT_BASIC_INFORMATION *out = info; +- struct pollfd fd; +- NTSTATUS ret; +- +- TRACE("handle %p, info %p, ret_len %p.\n", handle, info, ret_len); +- +- if ((ret = get_object( handle, &obj ))) return ret; +- +- fd.fd = obj->fd; +- fd.events = POLLIN; +- out->EventState = poll( &fd, 1, 0 ); +- out->EventType = (obj->type == ESYNC_AUTO_EVENT ? SynchronizationEvent : NotificationEvent); +- if (ret_len) *ret_len = sizeof(*out); +- +- return STATUS_SUCCESS; +-} +- +-NTSTATUS esync_create_mutex( HANDLE *handle, ACCESS_MASK access, +- const OBJECT_ATTRIBUTES *attr, BOOLEAN initial ) +-{ +- TRACE("name %s, initial %d.\n", +- attr ? debugstr_us(attr->ObjectName) : "", initial); +- +- return create_esync( ESYNC_MUTEX, handle, access, attr, initial ? 0 : 1, 0 ); +-} +- +-NTSTATUS esync_open_mutex( HANDLE *handle, ACCESS_MASK access, +- const OBJECT_ATTRIBUTES *attr ) +-{ +- TRACE("name %s.\n", debugstr_us(attr->ObjectName)); +- +- return open_esync( ESYNC_MUTEX, handle, access, attr ); +-} +- +-NTSTATUS esync_release_mutex( HANDLE *handle, LONG *prev ) +-{ +- struct esync *obj; +- struct mutex *mutex; +- static const uint64_t value = 1; +- NTSTATUS ret; +- +- TRACE("%p, %p.\n", handle, prev); +- +- if ((ret = get_object( handle, &obj ))) return ret; +- mutex = obj->shm; +- +- /* This is thread-safe, because the only thread that can change the tid to +- * or from our tid is ours. */ +- if (mutex->tid != GetCurrentThreadId()) return STATUS_MUTANT_NOT_OWNED; +- +- if (prev) *prev = mutex->count; +- +- mutex->count--; +- +- if (!mutex->count) +- { +- /* This is also thread-safe, as long as signaling the file is the last +- * thing we do. Other threads don't care about the tid if it isn't +- * theirs. */ +- mutex->tid = 0; +- +- if (write( obj->fd, &value, sizeof(value) ) == -1) +- return errno_to_status( errno ); +- } +- +- return STATUS_SUCCESS; +-} +- +-NTSTATUS esync_query_mutex( HANDLE handle, void *info, ULONG *ret_len ) +-{ +- struct esync *obj; +- struct mutex *mutex; +- MUTANT_BASIC_INFORMATION *out = info; +- NTSTATUS ret; +- +- TRACE("handle %p, info %p, ret_len %p.\n", handle, info, ret_len); +- +- if ((ret = get_object( handle, &obj ))) return ret; +- mutex = obj->shm; +- +- out->CurrentCount = 1 - mutex->count; +- out->OwnedByCaller = (mutex->tid == GetCurrentThreadId()); +- out->AbandonedState = (mutex->tid == ~0); +- if (ret_len) *ret_len = sizeof(*out); +- +- return STATUS_SUCCESS; +-} +- +-#define TICKSPERSEC 10000000 +-#define TICKSPERMSEC 10000 +- +-static LONGLONG update_timeout( ULONGLONG end ) +-{ +- LARGE_INTEGER now; +- LONGLONG timeleft; +- +- NtQuerySystemTime( &now ); +- timeleft = end - now.QuadPart; +- if (timeleft < 0) timeleft = 0; +- return timeleft; +-} +- +-static int do_poll( struct pollfd *fds, nfds_t nfds, ULONGLONG *end ) +-{ +- int ret; +- +- do +- { +- if (end) +- { +- LONGLONG timeleft = update_timeout( *end ); +- +-#ifdef HAVE_PPOLL +- /* We use ppoll() if available since the time granularity is better. */ +- struct timespec tmo_p; +- tmo_p.tv_sec = timeleft / (ULONGLONG)TICKSPERSEC; +- tmo_p.tv_nsec = (timeleft % TICKSPERSEC) * 100; +- ret = ppoll( fds, nfds, &tmo_p, NULL ); +-#else +- ret = poll( fds, nfds, timeleft / TICKSPERMSEC ); +-#endif +- } +- else +- ret = poll( fds, nfds, -1 ); +- +- /* If we receive EINTR we were probably suspended (SIGUSR1), possibly for a +- * system APC. The right thing to do is just try again. */ +- } while (ret < 0 && errno == EINTR); +- +- return ret; +-} +- +-/* Return TRUE if abandoned. */ +-static BOOL update_grabbed_object( struct esync *obj ) +-{ +- BOOL ret = FALSE; +- +- if (obj->type == ESYNC_MUTEX) +- { +- struct mutex *mutex = obj->shm; +- /* We don't have to worry about a race between this and read(); the +- * fact that we grabbed it means the count is now zero, so nobody else +- * can (and the only thread that can release it is us). */ +- if (mutex->tid == ~0) +- ret = TRUE; +- mutex->tid = GetCurrentThreadId(); +- mutex->count++; +- } +- else if (obj->type == ESYNC_SEMAPHORE) +- { +- struct semaphore *semaphore = obj->shm; +- /* We don't have to worry about a race between this and read(); the +- * fact that we were able to grab it at all means the count is nonzero, +- * and if someone else grabbed it then the count must have been >= 2, +- * etc. */ +- InterlockedExchangeAdd( &semaphore->count, -1 ); +- } +- else if (obj->type == ESYNC_AUTO_EVENT) +- { +- struct event *event = obj->shm; +- /* We don't have to worry about a race between this and read(), since +- * this is just a hint, and the real state is in the kernel object. +- * This might already be 0, but that's okay! */ +- event->signaled = 0; +- } +- +- return ret; +-} +- +-/* A value of STATUS_NOT_IMPLEMENTED returned from this function means that we +- * need to delegate to server_select(). */ +-static NTSTATUS __esync_wait_objects( DWORD count, const HANDLE *handles, BOOLEAN wait_any, +- BOOLEAN alertable, const LARGE_INTEGER *timeout ) +-{ +- static const LARGE_INTEGER zero; +- +- struct esync *objs[MAXIMUM_WAIT_OBJECTS]; +- struct pollfd fds[MAXIMUM_WAIT_OBJECTS + 1]; +- int has_esync = 0, has_server = 0; +- BOOL msgwait = FALSE; +- LONGLONG timeleft; +- LARGE_INTEGER now; +- DWORD pollcount; +- ULONGLONG end; +- int64_t value; +- ssize_t size; +- int i, j, ret; +- +- /* Grab the APC fd if we don't already have it. */ +- if (alertable && ntdll_get_thread_data()->esync_apc_fd == -1) +- { +- obj_handle_t fd_handle; +- sigset_t sigset; +- int fd = -1; +- +- server_enter_uninterrupted_section( &fd_cache_mutex, &sigset ); +- SERVER_START_REQ( get_esync_apc_fd ) +- { +- if (!(ret = wine_server_call( req ))) +- { +- fd = receive_fd( &fd_handle ); +- assert( fd_handle == GetCurrentThreadId() ); +- } +- } +- SERVER_END_REQ; +- server_leave_uninterrupted_section( &fd_cache_mutex, &sigset ); +- +- ntdll_get_thread_data()->esync_apc_fd = fd; +- } +- +- NtQuerySystemTime( &now ); +- if (timeout) +- { +- if (timeout->QuadPart == TIMEOUT_INFINITE) +- timeout = NULL; +- else if (timeout->QuadPart >= 0) +- end = timeout->QuadPart; +- else +- end = now.QuadPart - timeout->QuadPart; +- } +- +- for (i = 0; i < count; i++) +- { +- ret = get_object( handles[i], &objs[i] ); +- if (ret == STATUS_SUCCESS) +- has_esync = 1; +- else if (ret == STATUS_NOT_IMPLEMENTED) +- has_server = 1; +- else +- return ret; +- } +- +- if (objs[count - 1] && objs[count - 1]->type == ESYNC_QUEUE) +- msgwait = TRUE; +- +- if (has_esync && has_server) +- FIXME("Can't wait on esync and server objects at the same time!\n"); +- else if (has_server) +- return STATUS_NOT_IMPLEMENTED; +- +- if (TRACE_ON(esync)) +- { +- TRACE("Waiting for %s of %d handles:", wait_any ? "any" : "all", count); +- for (i = 0; i < count; i++) +- TRACE(" %p", handles[i]); +- +- if (msgwait) +- TRACE(" or driver events"); +- if (alertable) +- TRACE(", alertable"); +- +- if (!timeout) +- TRACE(", timeout = INFINITE.\n"); +- else +- { +- timeleft = update_timeout( end ); +- TRACE(", timeout = %ld.%07ld sec.\n", +- (long) timeleft / TICKSPERSEC, (long) timeleft % TICKSPERSEC); +- } +- } +- +- if (wait_any || count == 1) +- { +- /* Try to check objects now, so we can obviate poll() at least. */ +- for (i = 0; i < count; i++) +- { +- struct esync *obj = objs[i]; +- +- if (obj) +- { +- switch (obj->type) +- { +- case ESYNC_MUTEX: +- { +- struct mutex *mutex = obj->shm; +- +- if (mutex->tid == GetCurrentThreadId()) +- { +- TRACE("Woken up by handle %p [%d].\n", handles[i], i); +- mutex->count++; +- return i; +- } +- else if (!mutex->count) +- { +- if ((size = read( obj->fd, &value, sizeof(value) )) == sizeof(value)) +- { +- if (mutex->tid == ~0) +- { +- TRACE("Woken up by abandoned mutex %p [%d].\n", handles[i], i); +- i += STATUS_ABANDONED_WAIT_0; +- } +- else +- TRACE("Woken up by handle %p [%d].\n", handles[i], i); +- mutex->tid = GetCurrentThreadId(); +- mutex->count++; +- return i; +- } +- } +- break; +- } +- case ESYNC_SEMAPHORE: +- { +- struct semaphore *semaphore = obj->shm; +- +- if (semaphore->count) +- { +- if ((size = read( obj->fd, &value, sizeof(value) )) == sizeof(value)) +- { +- TRACE("Woken up by handle %p [%d].\n", handles[i], i); +- InterlockedDecrement( &semaphore->count ); +- return i; +- } +- } +- break; +- } +- case ESYNC_AUTO_EVENT: +- { +- struct event *event = obj->shm; +- +- if (event->signaled) +- { +- if ((size = read( obj->fd, &value, sizeof(value) )) == sizeof(value)) +- { +- TRACE("Woken up by handle %p [%d].\n", handles[i], i); +- event->signaled = 0; +- return i; +- } +- } +- break; +- } +- case ESYNC_MANUAL_EVENT: +- { +- struct event *event = obj->shm; +- +- if (event->signaled) +- { +- TRACE("Woken up by handle %p [%d].\n", handles[i], i); +- return i; +- } +- break; +- } +- case ESYNC_AUTO_SERVER: +- case ESYNC_MANUAL_SERVER: +- case ESYNC_QUEUE: +- /* We can't wait on any of these. Fortunately I don't think +- * they'll ever be uncontended anyway (at least, they won't be +- * performance-critical). */ +- break; +- } +- } +- +- fds[i].fd = obj ? obj->fd : -1; +- fds[i].events = POLLIN; +- } +- if (alertable) +- { +- fds[i].fd = ntdll_get_thread_data()->esync_apc_fd; +- fds[i].events = POLLIN; +- i++; +- } +- pollcount = i; +- +- while (1) +- { +- ret = do_poll( fds, pollcount, timeout ? &end : NULL ); +- if (ret > 0) +- { +- /* We must check this first! The server may set an event that +- * we're waiting on, but we need to return STATUS_USER_APC. */ +- if (alertable) +- { +- if (fds[pollcount - 1].revents & POLLIN) +- goto userapc; +- } +- +- /* Find out which object triggered the wait. */ +- for (i = 0; i < count; i++) +- { +- struct esync *obj = objs[i]; +- +- if (fds[i].revents & (POLLERR | POLLHUP | POLLNVAL)) +- { +- ERR("Polling on fd %d returned %#x.\n", fds[i].fd, fds[i].revents); +- return STATUS_INVALID_HANDLE; +- } +- +- if (obj) +- { +- if (obj->type == ESYNC_MANUAL_EVENT +- || obj->type == ESYNC_MANUAL_SERVER +- || obj->type == ESYNC_QUEUE) +- { +- /* Don't grab the object, just check if it's signaled. */ +- if (fds[i].revents & POLLIN) +- { +- TRACE("Woken up by handle %p [%d].\n", handles[i], i); +- return i; +- } +- } +- else +- { +- if ((size = read( fds[i].fd, &value, sizeof(value) )) == sizeof(value)) +- { +- /* We found our object. */ +- TRACE("Woken up by handle %p [%d].\n", handles[i], i); +- if (update_grabbed_object( obj )) +- return STATUS_ABANDONED_WAIT_0 + i; +- return i; +- } +- } +- } +- } +- +- /* If we got here, someone else stole (or reset, etc.) whatever +- * we were waiting for. So keep waiting. */ +- NtQuerySystemTime( &now ); +- } +- else +- goto err; +- } +- } +- else +- { +- /* Wait-all is a little trickier to implement correctly. Fortunately, +- * it's not as common. +- * +- * The idea is basically just to wait in sequence on every object in the +- * set. Then when we're done, try to grab them all in a tight loop. If +- * that fails, release any resources we've grabbed (and yes, we can +- * reliably do this—it's just mutexes and semaphores that we have to +- * put back, and in both cases we just put back 1), and if any of that +- * fails we start over. +- * +- * What makes this inherently bad is that we might temporarily grab a +- * resource incorrectly. Hopefully it'll be quick (and hey, it won't +- * block on wineserver) so nobody will notice. Besides, consider: if +- * object A becomes signaled but someone grabs it before we can grab it +- * and everything else, then they could just as well have grabbed it +- * before it became signaled. Similarly if object A was signaled and we +- * were blocking on object B, then B becomes available and someone grabs +- * A before we can, then they might have grabbed A before B became +- * signaled. In either case anyone who tries to wait on A or B will be +- * waiting for an instant while we put things back. */ +- +- while (1) +- { +-tryagain: +- /* First step: try to poll on each object in sequence. */ +- fds[0].events = POLLIN; +- pollcount = 1; +- if (alertable) +- { +- /* We also need to wait on APCs. */ +- fds[1].fd = ntdll_get_thread_data()->esync_apc_fd; +- fds[1].events = POLLIN; +- pollcount++; +- } +- for (i = 0; i < count; i++) +- { +- struct esync *obj = objs[i]; +- +- fds[0].fd = obj ? obj->fd : -1; +- +- if (obj && obj->type == ESYNC_MUTEX) +- { +- /* It might be ours. */ +- struct mutex *mutex = obj->shm; +- +- if (mutex->tid == GetCurrentThreadId()) +- continue; +- } +- +- ret = do_poll( fds, pollcount, timeout ? &end : NULL ); +- if (ret <= 0) +- goto err; +- else if (alertable && (fds[1].revents & POLLIN)) +- goto userapc; +- +- if (fds[0].revents & (POLLHUP | POLLERR | POLLNVAL)) +- { +- ERR("Polling on fd %d returned %#x.\n", fds[0].fd, fds[0].revents); +- return STATUS_INVALID_HANDLE; +- } +- } +- +- /* If we got here and we haven't timed out, that means all of the +- * handles were signaled. Check to make sure they still are. */ +- for (i = 0; i < count; i++) +- { +- fds[i].fd = objs[i] ? objs[i]->fd : -1; +- fds[i].events = POLLIN; +- } +- /* There's no reason to check for APCs here. */ +- pollcount = i; +- +- /* Poll everything to see if they're still signaled. */ +- ret = poll( fds, pollcount, 0 ); +- if (ret == pollcount) +- { +- BOOL abandoned = FALSE; +- +- /* Quick, grab everything. */ +- for (i = 0; i < count; i++) +- { +- struct esync *obj = objs[i]; +- +- switch (obj->type) +- { +- case ESYNC_MUTEX: +- { +- struct mutex *mutex = obj->shm; +- if (mutex->tid == GetCurrentThreadId()) +- break; +- /* otherwise fall through */ +- } +- case ESYNC_SEMAPHORE: +- case ESYNC_AUTO_EVENT: +- if ((size = read( fds[i].fd, &value, sizeof(value) )) != sizeof(value)) +- { +- /* We were too slow. Put everything back. */ +- value = 1; +- for (j = i; j >= 0; j--) +- { +- if (write( obj->fd, &value, sizeof(value) ) == -1) +- return errno_to_status( errno ); +- } +- +- goto tryagain; /* break out of two loops and a switch */ +- } +- break; +- default: +- /* If a manual-reset event changed between there and +- * here, it's shouldn't be a problem. */ +- break; +- } +- } +- +- /* If we got here, we successfully waited on every object. */ +- /* Make sure to let ourselves know that we grabbed the mutexes +- * and semaphores. */ +- for (i = 0; i < count; i++) +- abandoned |= update_grabbed_object( objs[i] ); +- +- if (abandoned) +- { +- TRACE("Wait successful, but some object(s) were abandoned.\n"); +- return STATUS_ABANDONED; +- } +- TRACE("Wait successful.\n"); +- return STATUS_SUCCESS; +- } +- +- /* If we got here, ppoll() returned less than all of our objects. +- * So loop back to the beginning and try again. */ +- } /* while(1) */ +- } /* else (wait-all) */ +- +-err: +- /* We should only get here if poll() failed. */ +- +- if (ret == 0) +- { +- TRACE("Wait timed out.\n"); +- return STATUS_TIMEOUT; +- } +- else +- { +- ERR("ppoll failed: %s\n", strerror(errno)); +- return errno_to_status( errno ); +- } +- +-userapc: +- TRACE("Woken up by user APC.\n"); +- +- /* We have to make a server call anyway to get the APC to execute, so just +- * delegate down to server_select(). */ +- ret = server_wait( NULL, 0, SELECT_INTERRUPTIBLE | SELECT_ALERTABLE, &zero ); +- +- /* This can happen if we received a system APC, and the APC fd was woken up +- * before we got SIGUSR1. poll() doesn't return EINTR in that case. The +- * right thing to do seems to be to return STATUS_USER_APC anyway. */ +- if (ret == STATUS_TIMEOUT) ret = STATUS_USER_APC; +- return ret; +-} +- +-/* We need to let the server know when we are doing a message wait, and when we +- * are done with one, so that all of the code surrounding hung queues works. +- * We also need this for WaitForInputIdle(). */ +-static void server_set_msgwait( int in_msgwait ) +-{ +- SERVER_START_REQ( esync_msgwait ) +- { +- req->in_msgwait = in_msgwait; +- wine_server_call( req ); +- } +- SERVER_END_REQ; +-} +- +-/* This is a very thin wrapper around the proper implementation above. The +- * purpose is to make sure the server knows when we are doing a message wait. +- * This is separated into a wrapper function since there are at least a dozen +- * exit paths from esync_wait_objects(). */ +-NTSTATUS esync_wait_objects( DWORD count, const HANDLE *handles, BOOLEAN wait_any, +- BOOLEAN alertable, const LARGE_INTEGER *timeout ) +-{ +- BOOL msgwait = FALSE; +- struct esync *obj; +- NTSTATUS ret; +- +- if (count && !get_object( handles[count - 1], &obj ) && obj->type == ESYNC_QUEUE) +- { +- msgwait = TRUE; +- server_set_msgwait( 1 ); +- } +- +- ret = __esync_wait_objects( count, handles, wait_any, alertable, timeout ); +- +- if (msgwait) +- server_set_msgwait( 0 ); +- +- return ret; +-} +- +-NTSTATUS esync_signal_and_wait( HANDLE signal, HANDLE wait, BOOLEAN alertable, +- const LARGE_INTEGER *timeout ) +-{ +- struct esync *obj; +- NTSTATUS ret; +- +- if ((ret = get_object( signal, &obj ))) return ret; +- +- switch (obj->type) +- { +- case ESYNC_SEMAPHORE: +- ret = esync_release_semaphore( signal, 1, NULL ); +- break; +- case ESYNC_AUTO_EVENT: +- case ESYNC_MANUAL_EVENT: +- ret = esync_set_event( signal ); +- break; +- case ESYNC_MUTEX: +- ret = esync_release_mutex( signal, NULL ); +- break; +- default: +- return STATUS_OBJECT_TYPE_MISMATCH; +- } +- if (ret) return ret; +- +- return esync_wait_objects( 1, &wait, TRUE, alertable, timeout ); +-} +- +-void esync_init(void) +-{ +- struct stat st; +- +- if (!do_esync()) +- { +- /* make sure the server isn't running with WINEESYNC */ +- HANDLE handle; +- NTSTATUS ret; +- +- ret = create_esync( 0, &handle, 0, NULL, 0, 0 ); +- if (ret != STATUS_NOT_IMPLEMENTED) +- { +- ERR("Server is running with WINEESYNC but this process is not, please enable WINEESYNC or restart wineserver.\n"); +- exit(1); +- } +- +- return; +- } +- +- if (stat( config_dir, &st ) == -1) +- ERR("Cannot stat %s\n", config_dir); +- +- if (st.st_ino != (unsigned long)st.st_ino) +- sprintf( shm_name, "/wine-%lx%08lx-esync", (unsigned long)((unsigned long long)st.st_ino >> 32), (unsigned long)st.st_ino ); +- else +- sprintf( shm_name, "/wine-%lx-esync", (unsigned long)st.st_ino ); +- +- if ((shm_fd = shm_open( shm_name, O_RDWR, 0644 )) == -1) +- { +- /* probably the server isn't running with WINEESYNC, tell the user and bail */ +- if (errno == ENOENT) +- ERR("Failed to open esync shared memory file; make sure no stale wineserver instances are running without WINEESYNC.\n"); +- else +- ERR("Failed to initialize shared memory: %s\n", strerror( errno )); +- exit(1); +- } +- +- pagesize = sysconf( _SC_PAGESIZE ); +- +- shm_addrs = calloc( 128, sizeof(shm_addrs[0]) ); +- shm_addrs_size = 128; +-} +diff --git a/dlls/ntdll/unix/esync.h b/dlls/ntdll/unix/esync.h +deleted file mode 100644 +index 59f8809..0000000 +--- a/dlls/ntdll/unix/esync.h ++++ /dev/null +@@ -1,61 +0,0 @@ +-/* +- * eventfd-based synchronization objects +- * +- * Copyright (C) 2018 Zebediah Figura +- * +- * This library is free software; you can redistribute it and/or +- * modify it under the terms of the GNU Lesser General Public +- * License as published by the Free Software Foundation; either +- * version 2.1 of the License, or (at your option) any later version. +- * +- * This library is distributed in the hope that it will be useful, +- * but WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- * Lesser General Public License for more details. +- * +- * You should have received a copy of the GNU Lesser General Public +- * License along with this library; if not, write to the Free Software +- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA +- */ +- +-extern int do_esync(void); +-extern void esync_init(void); +-extern NTSTATUS esync_close( HANDLE handle ); +- +-extern NTSTATUS esync_create_semaphore(HANDLE *handle, ACCESS_MASK access, +- const OBJECT_ATTRIBUTES *attr, LONG initial, LONG max); +-extern NTSTATUS esync_open_semaphore( HANDLE *handle, ACCESS_MASK access, +- const OBJECT_ATTRIBUTES *attr ); +-extern NTSTATUS esync_query_semaphore( HANDLE handle, void *info, ULONG *ret_len ); +-extern NTSTATUS esync_release_semaphore( HANDLE handle, ULONG count, ULONG *prev ); +- +-extern NTSTATUS esync_create_event( HANDLE *handle, ACCESS_MASK access, +- const OBJECT_ATTRIBUTES *attr, EVENT_TYPE type, BOOLEAN initial ); +-extern NTSTATUS esync_open_event( HANDLE *handle, ACCESS_MASK access, +- const OBJECT_ATTRIBUTES *attr ); +-extern NTSTATUS esync_pulse_event( HANDLE handle ); +-extern NTSTATUS esync_query_event( HANDLE handle, void *info, ULONG *ret_len ); +-extern NTSTATUS esync_reset_event( HANDLE handle ); +-extern NTSTATUS esync_set_event( HANDLE handle ); +- +-extern NTSTATUS esync_create_mutex( HANDLE *handle, ACCESS_MASK access, +- const OBJECT_ATTRIBUTES *attr, BOOLEAN initial ); +-extern NTSTATUS esync_open_mutex( HANDLE *handle, ACCESS_MASK access, +- const OBJECT_ATTRIBUTES *attr ); +-extern NTSTATUS esync_query_mutex( HANDLE handle, void *info, ULONG *ret_len ); +-extern NTSTATUS esync_release_mutex( HANDLE *handle, LONG *prev ); +- +-extern NTSTATUS esync_wait_objects( DWORD count, const HANDLE *handles, BOOLEAN wait_any, +- BOOLEAN alertable, const LARGE_INTEGER *timeout ); +-extern NTSTATUS esync_signal_and_wait( HANDLE signal, HANDLE wait, BOOLEAN alertable, +- const LARGE_INTEGER *timeout ); +- +- +-/* We have to synchronize on the fd cache mutex so that our calls to receive_fd +- * don't race with theirs. It looks weird, I know. +- * +- * If we weren't trying to avoid touching the code I'd rename the mutex to +- * "server_fd_mutex" or something similar. */ +-extern pthread_mutex_t fd_cache_mutex; +- +-extern int receive_fd( obj_handle_t *handle ); +diff --git a/dlls/ntdll/unix/file.c b/dlls/ntdll/unix/file.c +index 2bfb9b7..81564d7 100644 +--- a/dlls/ntdll/unix/file.c ++++ b/dlls/ntdll/unix/file.c +@@ -7507,7 +7507,7 @@ NTSTATUS WINAPI NtLockFile( HANDLE file, HANDLE event, PIO_APC_ROUTINE apc, void + } + if (handle) + { +- NtWaitForSingleObject( handle, FALSE, NULL ); ++ server_wait_for_object( handle, FALSE, NULL ); + NtClose( handle ); + } + else /* Unix lock conflict, sleep a bit and retry */ +diff --git a/dlls/ntdll/unix/loader.c b/dlls/ntdll/unix/loader.c +index 50d9e27..f163492 100644 +--- a/dlls/ntdll/unix/loader.c ++++ b/dlls/ntdll/unix/loader.c +@@ -88,7 +88,6 @@ extern char **environ; + #include "winioctl.h" + #include "winternl.h" + #include "unix_private.h" +-#include "esync.h" + #include "wine/list.h" + #include "ntsyscalls.h" + #include "wine/debug.h" +@@ -1853,7 +1852,6 @@ static void start_main_thread(void) + signal_alloc_thread( teb ); + dbg_init(); + startup_info_size = server_init_process(); +- esync_init(); + virtual_map_user_shared_data(); + init_cpu_info(); + init_files(); +diff --git a/dlls/ntdll/unix/process.c b/dlls/ntdll/unix/process.c +index 3591a3e..b744517 100644 +--- a/dlls/ntdll/unix/process.c ++++ b/dlls/ntdll/unix/process.c +@@ -921,7 +921,7 @@ NTSTATUS WINAPI NtCreateUserProcess( HANDLE *process_handle_ptr, HANDLE *thread_ + + /* wait for the new process info to be ready */ + +- NtWaitForSingleObject( process_info, FALSE, NULL ); ++ server_wait_for_object( process_info, FALSE, NULL ); + SERVER_START_REQ( get_new_process_info ) + { + req->info = wine_server_obj_handle( process_info ); +diff --git a/dlls/ntdll/unix/server.c b/dlls/ntdll/unix/server.c +index 0d4b320..dba7645 100644 +--- a/dlls/ntdll/unix/server.c ++++ b/dlls/ntdll/unix/server.c +@@ -83,7 +83,6 @@ + #include "wine/server.h" + #include "wine/debug.h" + #include "unix_private.h" +-#include "esync.h" + #include "ddk/wdm.h" + + WINE_DEFAULT_DEBUG_CHANNEL(server); +@@ -814,6 +813,21 @@ unsigned int server_wait( const select_op_t *select_op, data_size_t size, UINT f + } + + ++/* helper function to perform a server-side wait on an internal handle without ++ * using the fast synchronization path */ ++unsigned int server_wait_for_object( HANDLE handle, BOOL alertable, const LARGE_INTEGER *timeout ) ++{ ++ select_op_t select_op; ++ UINT flags = SELECT_INTERRUPTIBLE; ++ ++ if (alertable) flags |= SELECT_ALERTABLE; ++ ++ select_op.wait.op = SELECT_WAIT; ++ select_op.wait.handles[0] = wine_server_obj_handle( handle ); ++ return server_wait( &select_op, offsetof( select_op_t, wait.handles[1] ), flags, timeout ); ++} ++ ++ + /*********************************************************************** + * NtContinue (NTDLL.@) + */ +@@ -875,7 +889,7 @@ unsigned int server_queue_process_apc( HANDLE process, const apc_call_t *call, a + } + else + { +- NtWaitForSingleObject( handle, FALSE, NULL ); ++ server_wait_for_object( handle, FALSE, NULL ); + + SERVER_START_REQ( get_apc_result ) + { +@@ -947,7 +961,7 @@ void wine_server_send_fd( int fd ) + * + * Receive a file descriptor passed from the server. + */ +-int receive_fd( obj_handle_t *handle ) ++static int receive_fd( obj_handle_t *handle ) + { + struct iovec vec; + struct msghdr msghdr; +@@ -1826,12 +1840,17 @@ NTSTATUS WINAPI NtDuplicateObject( HANDLE source_process, HANDLE source, HANDLE + return result.dup_handle.status; + } + ++ /* hold fd_cache_mutex to prevent the fd from being added again between the ++ * call to remove_fd_from_cache and close_handle */ + server_enter_uninterrupted_section( &fd_cache_mutex, &sigset ); + + /* always remove the cached fd; if the server request fails we'll just + * retrieve it again */ + if (options & DUPLICATE_CLOSE_SOURCE) ++ { + fd = remove_fd_from_cache( source ); ++ close_fast_sync_obj( source ); ++ } + + SERVER_START_REQ( dup_handle ) + { +@@ -1897,14 +1916,15 @@ NTSTATUS WINAPI NtClose( HANDLE handle ) + if (HandleToLong( handle ) >= ~5 && HandleToLong( handle ) <= ~0) + return STATUS_SUCCESS; + ++ /* hold fd_cache_mutex to prevent the fd from being added again between the ++ * call to remove_fd_from_cache and close_handle */ + server_enter_uninterrupted_section( &fd_cache_mutex, &sigset ); + + /* always remove the cached fd; if the server request fails we'll just + * retrieve it again */ + fd = remove_fd_from_cache( handle ); + +- if (do_esync()) +- esync_close( handle ); ++ close_fast_sync_obj( handle ); + + SERVER_START_REQ( close_handle ) + { +diff --git a/dlls/ntdll/unix/sync.c b/dlls/ntdll/unix/sync.c +index 1072265..a9a0a2c 100644 +--- a/dlls/ntdll/unix/sync.c ++++ b/dlls/ntdll/unix/sync.c +@@ -30,9 +30,11 @@ + #include + #include + #include ++#include + #include + #include + #include ++#include + #include + #ifdef HAVE_SYS_SYSCALL_H + #include +@@ -45,6 +47,7 @@ + #endif + #include + #include ++#include + #include + #include + #include +@@ -54,6 +57,9 @@ + #ifdef HAVE_KQUEUE + # include + #endif ++#ifdef HAVE_LINUX_NTSYNC_H ++# include ++#endif + + #include "ntstatus.h" + #define WIN32_NO_STATUS +@@ -63,7 +69,6 @@ + #include "wine/server.h" + #include "wine/debug.h" + #include "unix_private.h" +-#include "esync.h" + + WINE_DEFAULT_DEBUG_CHANNEL(sync); + +@@ -72,9 +77,11 @@ HANDLE keyed_event = 0; + static const char *debugstr_timeout( const LARGE_INTEGER *timeout ) + { + if (!timeout) return "(infinite)"; +- return wine_dbgstr_longlong( timeout->QuadPart ); ++ return wine_dbg_sprintf( "%lld.%07ld", (long long)(timeout->QuadPart / TICKSPERSEC), ++ (long)(timeout->QuadPart % TICKSPERSEC) ); + } + ++ + /* return a monotonic time counter, in Win32 ticks */ + static inline ULONGLONG monotonic_counter(void) + { +@@ -239,6 +246,902 @@ static unsigned int validate_open_object_attributes( const OBJECT_ATTRIBUTES *at + } + + ++#ifdef HAVE_LINUX_NTSYNC_H ++ ++static int get_linux_sync_device(void) ++{ ++ static LONG fast_sync_fd = -2; ++ ++ if (fast_sync_fd == -2) ++ { ++ HANDLE device; ++ int fd, needs_close; ++ NTSTATUS ret; ++ ++ SERVER_START_REQ( get_linux_sync_device ) ++ { ++ if (!(ret = wine_server_call( req ))) device = wine_server_ptr_handle( reply->handle ); ++ } ++ SERVER_END_REQ; ++ ++ if (!ret) ++ { ++ if (!server_get_unix_fd( device, 0, &fd, &needs_close, NULL, NULL )) ++ { ++ if (InterlockedCompareExchange( &fast_sync_fd, fd, -2 ) != -2) ++ { ++ /* someone beat us to it */ ++ if (needs_close) close( fd ); ++ NtClose( device ); ++ } ++ /* otherwise don't close the device */ ++ } ++ else ++ { ++ InterlockedCompareExchange( &fast_sync_fd, -1, -2 ); ++ NtClose( device ); ++ } ++ } ++ else ++ { ++ InterlockedCompareExchange( &fast_sync_fd, -1, -2 ); ++ } ++ } ++ return fast_sync_fd; ++} ++ ++/* It's possible for synchronization primitives to remain alive even after being ++ * closed, because a thread is still waiting on them. It's rare in practice, and ++ * documented as being undefined behaviour by Microsoft, but it works, and some ++ * applications rely on it. This means we need to refcount handles, and defer ++ * deleting them on the server side until the refcount reaches zero. We do this ++ * by having each client process hold a handle to the fast synchronization ++ * object, as well as a private refcount. When the client refcount reaches zero, ++ * it closes the handle; when all handles are closed, the server deletes the ++ * fast synchronization object. ++ * ++ * We also need this for signal-and-wait. The signal and wait operations aren't ++ * atomic, but we can't perform the signal and then return STATUS_INVALID_HANDLE ++ * for the wait—we need to either do both operations or neither. That means we ++ * need to grab references to both objects, and prevent them from being ++ * destroyed before we're done with them. ++ * ++ * We want lookup of objects from the cache to be very fast; ideally, it should ++ * be lock-free. We achieve this by using atomic modifications to "refcount", ++ * and guaranteeing that all other fields are valid and correct *as long as* ++ * refcount is nonzero, and we store the entire structure in memory which will ++ * never be freed. ++ * ++ * This means that acquiring the object can't use a simple atomic increment; it ++ * has to use a compare-and-swap loop to ensure that it doesn't try to increment ++ * an object with a zero refcount. That's still leagues better than a real lock, ++ * though, and release can be a single atomic decrement. ++ * ++ * It also means that threads modifying the cache need to take a lock, to ++ * prevent other threads from writing to it concurrently. ++ * ++ * It's possible for an object currently in use (by a waiter) to be closed and ++ * the same handle immediately reallocated to a different object. This should be ++ * a very rare situation, and in that case we simply don't cache the handle. ++ */ ++struct fast_sync_cache_entry ++{ ++ LONG refcount; ++ int fd; ++ enum fast_sync_type type; ++ unsigned int access; ++ BOOL closed; ++ /* handle to the underlying fast sync object, stored as obj_handle_t to save ++ * space */ ++ obj_handle_t handle; ++}; ++ ++ ++static void release_fast_sync_obj( struct fast_sync_cache_entry *cache ) ++{ ++ /* save the handle and fd now; as soon as the refcount hits 0 we cannot ++ * access the cache anymore */ ++ HANDLE handle = wine_server_ptr_handle( cache->handle ); ++ int fd = cache->fd; ++ LONG refcount = InterlockedDecrement( &cache->refcount ); ++ ++ assert( refcount >= 0 ); ++ ++ if (!refcount) ++ { ++ NTSTATUS ret; ++ ++ /* we can't call NtClose here as we may be inside fd_cache_mutex */ ++ SERVER_START_REQ( close_handle ) ++ { ++ req->handle = wine_server_obj_handle( handle ); ++ ret = wine_server_call( req ); ++ } ++ SERVER_END_REQ; ++ ++ assert( !ret ); ++ close( fd ); ++ } ++} ++ ++ ++#define FAST_SYNC_CACHE_BLOCK_SIZE (65536 / sizeof(struct fast_sync_cache_entry)) ++#define FAST_SYNC_CACHE_ENTRIES 128 ++ ++static struct fast_sync_cache_entry *fast_sync_cache[FAST_SYNC_CACHE_ENTRIES]; ++static struct fast_sync_cache_entry fast_sync_cache_initial_block[FAST_SYNC_CACHE_BLOCK_SIZE]; ++ ++static inline unsigned int fast_sync_handle_to_index( HANDLE handle, unsigned int *entry ) ++{ ++ unsigned int idx = (wine_server_obj_handle(handle) >> 2) - 1; ++ *entry = idx / FAST_SYNC_CACHE_BLOCK_SIZE; ++ return idx % FAST_SYNC_CACHE_BLOCK_SIZE; ++} ++ ++ ++static struct fast_sync_cache_entry *cache_fast_sync_obj( HANDLE handle, obj_handle_t fast_sync, int fd, ++ enum fast_sync_type type, unsigned int access ) ++{ ++ unsigned int entry, idx = fast_sync_handle_to_index( handle, &entry ); ++ struct fast_sync_cache_entry *cache; ++ sigset_t sigset; ++ int refcount; ++ ++ if (entry >= FAST_SYNC_CACHE_ENTRIES) ++ { ++ FIXME( "too many allocated handles, not caching %p\n", handle ); ++ return NULL; ++ } ++ ++ if (!fast_sync_cache[entry]) /* do we need to allocate a new block of entries? */ ++ { ++ if (!entry) fast_sync_cache[0] = fast_sync_cache_initial_block; ++ else ++ { ++ static const size_t size = FAST_SYNC_CACHE_BLOCK_SIZE * sizeof(struct fast_sync_cache_entry); ++ void *ptr = anon_mmap_alloc( size, PROT_READ | PROT_WRITE ); ++ if (ptr == MAP_FAILED) return NULL; ++ if (InterlockedCompareExchangePointer( (void **)&fast_sync_cache[entry], ptr, NULL )) ++ munmap( ptr, size ); /* someone beat us to it */ ++ } ++ } ++ ++ cache = &fast_sync_cache[entry][idx]; ++ ++ /* Hold fd_cache_mutex instead of a separate mutex, to prevent the same ++ * race between this function and NtClose. That is, prevent the object from ++ * being cached again between close_fast_sync_obj() and close_handle. */ ++ server_enter_uninterrupted_section( &fd_cache_mutex, &sigset ); ++ ++ if (InterlockedCompareExchange( &cache->refcount, 0, 0 )) ++ { ++ /* We lost the race with another thread trying to cache this object, or ++ * the handle is currently being used for another object (i.e. it was ++ * closed and then reused). We have no way of knowing which, and in the ++ * latter case we can't cache this object until the old one is ++ * completely destroyed, so always return failure. */ ++ server_leave_uninterrupted_section( &fd_cache_mutex, &sigset ); ++ return NULL; ++ } ++ ++ cache->handle = fast_sync; ++ cache->fd = fd; ++ cache->type = type; ++ cache->access = access; ++ cache->closed = FALSE; ++ /* Make sure we set the other members before the refcount; this store needs ++ * release semantics [paired with the load in get_cached_fast_sync_obj()]. ++ * Set the refcount to 2 (one for the handle, one for the caller). */ ++ refcount = InterlockedExchange( &cache->refcount, 2 ); ++ assert( !refcount ); ++ ++ server_leave_uninterrupted_section( &fd_cache_mutex, &sigset ); ++ ++ return cache; ++} ++ ++ ++/* returns the previous value */ ++static inline LONG interlocked_inc_if_nonzero( LONG *dest ) ++{ ++ LONG val, tmp; ++ for (val = *dest;; val = tmp) ++ { ++ if (!val || (tmp = InterlockedCompareExchange( dest, val + 1, val )) == val) ++ break; ++ } ++ return val; ++} ++ ++ ++static struct fast_sync_cache_entry *get_cached_fast_sync_obj( HANDLE handle ) ++{ ++ unsigned int entry, idx = fast_sync_handle_to_index( handle, &entry ); ++ struct fast_sync_cache_entry *cache; ++ ++ if (entry >= FAST_SYNC_CACHE_ENTRIES || !fast_sync_cache[entry]) ++ return NULL; ++ ++ cache = &fast_sync_cache[entry][idx]; ++ ++ /* this load needs acquire semantics [paired with the store in ++ * cache_fast_sync_obj()] */ ++ if (!interlocked_inc_if_nonzero( &cache->refcount )) ++ return NULL; ++ ++ if (cache->closed) ++ { ++ /* The object is still being used, but "handle" has been closed. The ++ * handle value might have been reused for another object in the ++ * meantime, in which case we have to report that valid object, so ++ * force the caller to check the server. */ ++ release_fast_sync_obj( cache ); ++ return NULL; ++ } ++ ++ return cache; ++} ++ ++ ++static BOOL fast_sync_types_match( enum fast_sync_type a, enum fast_sync_type b ) ++{ ++ if (a == b) return TRUE; ++ if (a == FAST_SYNC_AUTO_EVENT && b == FAST_SYNC_MANUAL_EVENT) return TRUE; ++ if (b == FAST_SYNC_AUTO_EVENT && a == FAST_SYNC_MANUAL_EVENT) return TRUE; ++ return FALSE; ++} ++ ++ ++/* returns a pointer to a cache entry; if the object could not be cached, ++ * returns "stack_cache" instead, which should be allocated on stack */ ++static NTSTATUS get_fast_sync_obj( HANDLE handle, enum fast_sync_type desired_type, ACCESS_MASK desired_access, ++ struct fast_sync_cache_entry *stack_cache, ++ struct fast_sync_cache_entry **ret_cache ) ++{ ++ struct fast_sync_cache_entry *cache; ++ obj_handle_t fast_sync_handle; ++ enum fast_sync_type type; ++ unsigned int access; ++ int fd, needs_close; ++ NTSTATUS ret; ++ ++ /* try to find it in the cache already */ ++ if ((cache = get_cached_fast_sync_obj( handle ))) ++ { ++ *ret_cache = cache; ++ return STATUS_SUCCESS; ++ } ++ ++ /* try to retrieve it from the server */ ++ SERVER_START_REQ( get_linux_sync_obj ) ++ { ++ req->handle = wine_server_obj_handle( handle ); ++ if (!(ret = wine_server_call( req ))) ++ { ++ fast_sync_handle = reply->handle; ++ access = reply->access; ++ type = reply->type; ++ } ++ } ++ SERVER_END_REQ; ++ ++ if (ret) return ret; ++ ++ if ((ret = server_get_unix_fd( wine_server_ptr_handle( fast_sync_handle ), ++ 0, &fd, &needs_close, NULL, NULL ))) ++ return ret; ++ ++ cache = cache_fast_sync_obj( handle, fast_sync_handle, fd, type, access ); ++ if (!cache) ++ { ++ cache = stack_cache; ++ cache->handle = fast_sync_handle; ++ cache->fd = fd; ++ cache->type = type; ++ cache->access = access; ++ cache->closed = FALSE; ++ cache->refcount = 1; ++ } ++ ++ *ret_cache = cache; ++ ++ if (desired_type && !fast_sync_types_match( cache->type, desired_type )) ++ { ++ release_fast_sync_obj( cache ); ++ return STATUS_OBJECT_TYPE_MISMATCH; ++ } ++ ++ if ((cache->access & desired_access) != desired_access) ++ { ++ release_fast_sync_obj( cache ); ++ return STATUS_ACCESS_DENIED; ++ } ++ ++ return STATUS_SUCCESS; ++} ++ ++ ++/* caller must hold fd_cache_mutex */ ++void close_fast_sync_obj( HANDLE handle ) ++{ ++ struct fast_sync_cache_entry *cache = get_cached_fast_sync_obj( handle ); ++ ++ if (cache) ++ { ++ cache->closed = TRUE; ++ /* once for the reference we just grabbed, and once for the handle */ ++ release_fast_sync_obj( cache ); ++ release_fast_sync_obj( cache ); ++ } ++} ++ ++ ++static NTSTATUS linux_release_semaphore_obj( int obj, ULONG count, ULONG *prev_count ) ++{ ++ NTSTATUS ret; ++ ++ ret = ioctl( obj, NTSYNC_IOC_SEM_POST, &count ); ++ if (ret < 0) ++ { ++ if (errno == EOVERFLOW) ++ return STATUS_SEMAPHORE_LIMIT_EXCEEDED; ++ else ++ return errno_to_status( errno ); ++ } ++ if (prev_count) *prev_count = count; ++ return STATUS_SUCCESS; ++} ++ ++ ++static NTSTATUS fast_release_semaphore( HANDLE handle, ULONG count, ULONG *prev_count ) ++{ ++ struct fast_sync_cache_entry stack_cache, *cache; ++ NTSTATUS ret; ++ ++ if ((ret = get_fast_sync_obj( handle, FAST_SYNC_SEMAPHORE, ++ SEMAPHORE_MODIFY_STATE, &stack_cache, &cache ))) ++ return ret; ++ ++ ret = linux_release_semaphore_obj( cache->fd, count, prev_count ); ++ ++ release_fast_sync_obj( cache ); ++ return ret; ++} ++ ++ ++static NTSTATUS linux_query_semaphore_obj( int obj, SEMAPHORE_BASIC_INFORMATION *info ) ++{ ++ struct ntsync_sem_args args = {0}; ++ NTSTATUS ret; ++ ++ ret = ioctl( obj, NTSYNC_IOC_SEM_READ, &args ); ++ if (ret < 0) ++ return errno_to_status( errno ); ++ info->CurrentCount = args.count; ++ info->MaximumCount = args.max; ++ return STATUS_SUCCESS; ++} ++ ++ ++static NTSTATUS fast_query_semaphore( HANDLE handle, SEMAPHORE_BASIC_INFORMATION *info ) ++{ ++ struct fast_sync_cache_entry stack_cache, *cache; ++ NTSTATUS ret; ++ ++ if ((ret = get_fast_sync_obj( handle, FAST_SYNC_SEMAPHORE, ++ SEMAPHORE_QUERY_STATE, &stack_cache, &cache ))) ++ return ret; ++ ++ ret = linux_query_semaphore_obj( cache->fd, info ); ++ ++ release_fast_sync_obj( cache ); ++ return ret; ++} ++ ++ ++static NTSTATUS linux_set_event_obj( int obj, LONG *prev_state ) ++{ ++ NTSTATUS ret; ++ __u32 prev; ++ ++ ret = ioctl( obj, NTSYNC_IOC_EVENT_SET, &prev ); ++ if (ret < 0) ++ return errno_to_status( errno ); ++ if (prev_state) *prev_state = prev; ++ return STATUS_SUCCESS; ++} ++ ++ ++static NTSTATUS fast_set_event( HANDLE handle, LONG *prev_state ) ++{ ++ struct fast_sync_cache_entry stack_cache, *cache; ++ NTSTATUS ret; ++ ++ if ((ret = get_fast_sync_obj( handle, FAST_SYNC_AUTO_EVENT, ++ EVENT_MODIFY_STATE, &stack_cache, &cache ))) ++ return ret; ++ ++ ret = linux_set_event_obj( cache->fd, prev_state ); ++ ++ release_fast_sync_obj( cache ); ++ return ret; ++} ++ ++ ++static NTSTATUS linux_reset_event_obj( int obj, LONG *prev_state ) ++{ ++ NTSTATUS ret; ++ __u32 prev; ++ ++ ret = ioctl( obj, NTSYNC_IOC_EVENT_RESET, &prev ); ++ if (ret < 0) ++ return errno_to_status( errno ); ++ if (prev_state) *prev_state = prev; ++ return STATUS_SUCCESS; ++} ++ ++ ++static NTSTATUS fast_reset_event( HANDLE handle, LONG *prev_state ) ++{ ++ struct fast_sync_cache_entry stack_cache, *cache; ++ NTSTATUS ret; ++ ++ if ((ret = get_fast_sync_obj( handle, FAST_SYNC_AUTO_EVENT, ++ EVENT_MODIFY_STATE, &stack_cache, &cache ))) ++ return ret; ++ ++ ret = linux_reset_event_obj( cache->fd, prev_state ); ++ ++ release_fast_sync_obj( cache ); ++ return ret; ++} ++ ++ ++static NTSTATUS linux_pulse_event_obj( int obj, LONG *prev_state ) ++{ ++ NTSTATUS ret; ++ __u32 prev; ++ ++ ret = ioctl( obj, NTSYNC_IOC_EVENT_PULSE, &prev ); ++ if (ret < 0) ++ return errno_to_status( errno ); ++ if (prev_state) *prev_state = prev; ++ return STATUS_SUCCESS; ++} ++ ++ ++static NTSTATUS fast_pulse_event( HANDLE handle, LONG *prev_state ) ++{ ++ struct fast_sync_cache_entry stack_cache, *cache; ++ NTSTATUS ret; ++ ++ if ((ret = get_fast_sync_obj( handle, FAST_SYNC_AUTO_EVENT, ++ EVENT_MODIFY_STATE, &stack_cache, &cache ))) ++ return ret; ++ ++ ret = linux_pulse_event_obj( cache->fd, prev_state ); ++ ++ release_fast_sync_obj( cache ); ++ return ret; ++} ++ ++ ++static NTSTATUS linux_query_event_obj( int obj, enum fast_sync_type type, EVENT_BASIC_INFORMATION *info ) ++{ ++ struct ntsync_event_args args = {0}; ++ NTSTATUS ret; ++ ++ ret = ioctl( obj, NTSYNC_IOC_EVENT_READ, &args ); ++ if (ret < 0) ++ return errno_to_status( errno ); ++ info->EventType = (type == FAST_SYNC_AUTO_EVENT) ? SynchronizationEvent : NotificationEvent; ++ info->EventState = args.signaled; ++ return STATUS_SUCCESS; ++} ++ ++ ++static NTSTATUS fast_query_event( HANDLE handle, EVENT_BASIC_INFORMATION *info ) ++{ ++ struct fast_sync_cache_entry stack_cache, *cache; ++ NTSTATUS ret; ++ ++ if ((ret = get_fast_sync_obj( handle, FAST_SYNC_AUTO_EVENT, ++ EVENT_QUERY_STATE, &stack_cache, &cache ))) ++ return ret; ++ ++ ret = linux_query_event_obj( cache->fd, cache->type, info ); ++ ++ release_fast_sync_obj( cache ); ++ return ret; ++} ++ ++ ++static NTSTATUS linux_release_mutex_obj( int obj, LONG *prev_count ) ++{ ++ struct ntsync_mutex_args args = {0}; ++ NTSTATUS ret; ++ ++ args.owner = GetCurrentThreadId(); ++ ret = ioctl( obj, NTSYNC_IOC_MUTEX_UNLOCK, &args ); ++ ++ if (ret < 0) ++ { ++ if (errno == EOVERFLOW) ++ return STATUS_MUTANT_LIMIT_EXCEEDED; ++ else if (errno == EPERM) ++ return STATUS_MUTANT_NOT_OWNED; ++ else ++ return errno_to_status( errno ); ++ } ++ if (prev_count) *prev_count = 1 - args.count; ++ return STATUS_SUCCESS; ++} ++ ++ ++static NTSTATUS fast_release_mutex( HANDLE handle, LONG *prev_count ) ++{ ++ struct fast_sync_cache_entry stack_cache, *cache; ++ NTSTATUS ret; ++ ++ if ((ret = get_fast_sync_obj( handle, FAST_SYNC_MUTEX, 0, &stack_cache, &cache ))) ++ return ret; ++ ++ ret = linux_release_mutex_obj( cache->fd, prev_count ); ++ ++ release_fast_sync_obj( cache ); ++ return ret; ++} ++ ++ ++static NTSTATUS linux_query_mutex_obj( int obj, MUTANT_BASIC_INFORMATION *info ) ++{ ++ struct ntsync_mutex_args args = {0}; ++ NTSTATUS ret; ++ ++ ret = ioctl( obj, NTSYNC_IOC_MUTEX_READ, &args ); ++ ++ if (ret < 0) ++ { ++ if (errno == EOWNERDEAD) ++ { ++ info->AbandonedState = TRUE; ++ info->OwnedByCaller = FALSE; ++ info->CurrentCount = 1; ++ return STATUS_SUCCESS; ++ } ++ else ++ return errno_to_status( errno ); ++ } ++ info->AbandonedState = FALSE; ++ info->OwnedByCaller = (args.owner == GetCurrentThreadId()); ++ info->CurrentCount = 1 - args.count; ++ return STATUS_SUCCESS; ++} ++ ++ ++static NTSTATUS fast_query_mutex( HANDLE handle, MUTANT_BASIC_INFORMATION *info ) ++{ ++ struct fast_sync_cache_entry stack_cache, *cache; ++ NTSTATUS ret; ++ ++ if ((ret = get_fast_sync_obj( handle, FAST_SYNC_MUTEX, MUTANT_QUERY_STATE, ++ &stack_cache, &cache ))) ++ return ret; ++ ++ ret = linux_query_mutex_obj( cache->fd, info ); ++ ++ release_fast_sync_obj( cache ); ++ return ret; ++} ++ ++static void select_queue( HANDLE queue ) ++{ ++ SERVER_START_REQ( fast_select_queue ) ++ { ++ req->handle = wine_server_obj_handle( queue ); ++ wine_server_call( req ); ++ } ++ SERVER_END_REQ; ++} ++ ++static void unselect_queue( HANDLE queue, BOOL signaled ) ++{ ++ SERVER_START_REQ( fast_unselect_queue ) ++ { ++ req->handle = wine_server_obj_handle( queue ); ++ req->signaled = signaled; ++ wine_server_call( req ); ++ } ++ SERVER_END_REQ; ++} ++ ++static int get_fast_alert_obj(void) ++{ ++ struct ntdll_thread_data *data = ntdll_get_thread_data(); ++ struct fast_sync_cache_entry stack_cache, *cache; ++ HANDLE alert_handle; ++ unsigned int ret; ++ ++ if (!data->fast_alert_obj) ++ { ++ SERVER_START_REQ( get_fast_alert_event ) ++ { ++ if ((ret = wine_server_call( req ))) ++ ERR( "failed to get fast alert event, status %#x\n", ret ); ++ alert_handle = wine_server_ptr_handle( reply->handle ); ++ } ++ SERVER_END_REQ; ++ ++ if ((ret = get_fast_sync_obj( alert_handle, 0, SYNCHRONIZE, &stack_cache, &cache ))) ++ ERR( "failed to get fast alert obj, status %#x\n", ret ); ++ data->fast_alert_obj = cache->fd; ++ /* Set the fd to -1 so release_fast_sync_obj() won't close it. ++ * Manhandling the cache entry here is fine since we're the only thread ++ * that can access our own alert event. */ ++ cache->fd = -1; ++ release_fast_sync_obj( cache ); ++ NtClose( alert_handle ); ++ } ++ ++ return data->fast_alert_obj; ++} ++ ++static NTSTATUS linux_wait_objs( int device, const DWORD count, const int *objs, ++ BOOLEAN wait_any, BOOLEAN alertable, const LARGE_INTEGER *timeout ) ++{ ++ struct ntsync_wait_args args = {0}; ++ unsigned long request; ++ struct timespec now; ++ int ret; ++ ++ if (!timeout || timeout->QuadPart == TIMEOUT_INFINITE) ++ { ++ args.timeout = ~(__u64)0; ++ } ++ else if (timeout->QuadPart <= 0) ++ { ++ clock_gettime( CLOCK_MONOTONIC, &now ); ++ args.timeout = (now.tv_sec * NSECPERSEC) + now.tv_nsec + (-timeout->QuadPart * 100); ++ } ++ else ++ { ++ args.timeout = (timeout->QuadPart * 100) - (SECS_1601_TO_1970 * NSECPERSEC); ++ args.flags |= NTSYNC_WAIT_REALTIME; ++ } ++ ++ args.objs = (uintptr_t)objs; ++ args.count = count; ++ args.owner = GetCurrentThreadId(); ++ args.index = ~0u; ++ ++ if (alertable) ++ args.alert = get_fast_alert_obj(); ++ ++ if (wait_any || count == 1) ++ request = NTSYNC_IOC_WAIT_ANY; ++ else ++ request = NTSYNC_IOC_WAIT_ALL; ++ ++ do ++ { ++ ret = ioctl( device, request, &args ); ++ } while (ret < 0 && errno == EINTR); ++ ++ if (!ret) ++ { ++ if (args.index == count) ++ { ++ static const LARGE_INTEGER timeout; ++ ++ ret = server_wait( NULL, 0, SELECT_INTERRUPTIBLE | SELECT_ALERTABLE, &timeout ); ++ assert( ret == STATUS_USER_APC ); ++ return ret; ++ } ++ ++ return wait_any ? args.index : 0; ++ } ++ else if (errno == EOWNERDEAD) ++ return STATUS_ABANDONED + (wait_any ? args.index : 0); ++ else if (errno == ETIMEDOUT) ++ return STATUS_TIMEOUT; ++ else ++ return errno_to_status( errno ); ++} ++ ++static NTSTATUS fast_wait( DWORD count, const HANDLE *handles, BOOLEAN wait_any, ++ BOOLEAN alertable, const LARGE_INTEGER *timeout ) ++{ ++ struct fast_sync_cache_entry stack_cache[64], *cache[64]; ++ int device, objs[64]; ++ HANDLE queue = NULL; ++ NTSTATUS ret; ++ DWORD i, j; ++ ++ if ((device = get_linux_sync_device()) < 0) ++ return STATUS_NOT_IMPLEMENTED; ++ ++ for (i = 0; i < count; ++i) ++ { ++ if ((ret = get_fast_sync_obj( handles[i], 0, SYNCHRONIZE, &stack_cache[i], &cache[i] ))) ++ { ++ for (j = 0; j < i; ++j) ++ release_fast_sync_obj( cache[j] ); ++ return ret; ++ } ++ if (cache[i]->type == FAST_SYNC_QUEUE) ++ queue = handles[i]; ++ ++ objs[i] = cache[i]->fd; ++ } ++ ++ /* It's common to wait on the message queue alone. Some applications wait ++ * on it in fast paths, with a zero timeout. Since we take two server calls ++ * instead of one when going through fast_wait_objs(), and since we only ++ * need to go through that path if we're waiting on other objects, just ++ * delegate to the server if we're only waiting on the message queue. */ ++ if (count == 1 && queue) ++ { ++ release_fast_sync_obj( cache[0] ); ++ return server_wait_for_object( handles[0], alertable, timeout ); ++ } ++ ++ if (queue) select_queue( queue ); ++ ++ ret = linux_wait_objs( device, count, objs, wait_any, alertable, timeout ); ++ ++ if (queue) unselect_queue( queue, handles[ret] == queue ); ++ ++ for (i = 0; i < count; ++i) ++ release_fast_sync_obj( cache[i] ); ++ ++ return ret; ++} ++ ++static NTSTATUS fast_signal_and_wait( HANDLE signal, HANDLE wait, ++ BOOLEAN alertable, const LARGE_INTEGER *timeout ) ++{ ++ struct fast_sync_cache_entry signal_stack_cache, *signal_cache; ++ struct fast_sync_cache_entry wait_stack_cache, *wait_cache; ++ HANDLE queue = NULL; ++ NTSTATUS ret; ++ int device; ++ ++ if ((device = get_linux_sync_device()) < 0) ++ return STATUS_NOT_IMPLEMENTED; ++ ++ if ((ret = get_fast_sync_obj( signal, 0, 0, &signal_stack_cache, &signal_cache ))) ++ return ret; ++ ++ switch (signal_cache->type) ++ { ++ case FAST_SYNC_SEMAPHORE: ++ if (!(signal_cache->access & SEMAPHORE_MODIFY_STATE)) ++ { ++ release_fast_sync_obj( signal_cache ); ++ return STATUS_ACCESS_DENIED; ++ } ++ break; ++ ++ case FAST_SYNC_AUTO_EVENT: ++ case FAST_SYNC_MANUAL_EVENT: ++ if (!(signal_cache->access & EVENT_MODIFY_STATE)) ++ { ++ release_fast_sync_obj( signal_cache ); ++ return STATUS_ACCESS_DENIED; ++ } ++ break; ++ ++ case FAST_SYNC_MUTEX: ++ break; ++ ++ default: ++ /* can't be signaled */ ++ release_fast_sync_obj( signal_cache ); ++ return STATUS_OBJECT_TYPE_MISMATCH; ++ } ++ ++ if ((ret = get_fast_sync_obj( wait, 0, SYNCHRONIZE, &wait_stack_cache, &wait_cache ))) ++ { ++ release_fast_sync_obj( signal_cache ); ++ return ret; ++ } ++ ++ if (wait_cache->type == FAST_SYNC_QUEUE) ++ queue = wait; ++ ++ switch (signal_cache->type) ++ { ++ case FAST_SYNC_SEMAPHORE: ++ ret = linux_release_semaphore_obj( signal_cache->fd, 1, NULL ); ++ break; ++ ++ case FAST_SYNC_AUTO_EVENT: ++ case FAST_SYNC_MANUAL_EVENT: ++ ret = linux_set_event_obj( signal_cache->fd, NULL ); ++ break; ++ ++ case FAST_SYNC_MUTEX: ++ ret = linux_release_mutex_obj( signal_cache->fd, NULL ); ++ break; ++ ++ default: ++ assert( 0 ); ++ break; ++ } ++ ++ if (!ret) ++ { ++ if (queue) select_queue( queue ); ++ ret = linux_wait_objs( device, 1, &wait_cache->fd, TRUE, alertable, timeout ); ++ if (queue) unselect_queue( queue, !ret ); ++ } ++ ++ release_fast_sync_obj( signal_cache ); ++ release_fast_sync_obj( wait_cache ); ++ return ret; ++} ++ ++#else ++ ++void close_fast_sync_obj( HANDLE handle ) ++{ ++} ++ ++static NTSTATUS fast_release_semaphore( HANDLE handle, ULONG count, ULONG *prev_count ) ++{ ++ return STATUS_NOT_IMPLEMENTED; ++} ++ ++static NTSTATUS fast_query_semaphore( HANDLE handle, SEMAPHORE_BASIC_INFORMATION *info ) ++{ ++ return STATUS_NOT_IMPLEMENTED; ++} ++ ++static NTSTATUS fast_set_event( HANDLE handle, LONG *prev_state ) ++{ ++ return STATUS_NOT_IMPLEMENTED; ++} ++ ++static NTSTATUS fast_reset_event( HANDLE handle, LONG *prev_state ) ++{ ++ return STATUS_NOT_IMPLEMENTED; ++} ++ ++static NTSTATUS fast_pulse_event( HANDLE handle, LONG *prev_state ) ++{ ++ return STATUS_NOT_IMPLEMENTED; ++} ++ ++static NTSTATUS fast_query_event( HANDLE handle, EVENT_BASIC_INFORMATION *info ) ++{ ++ return STATUS_NOT_IMPLEMENTED; ++} ++ ++static NTSTATUS fast_release_mutex( HANDLE handle, LONG *prev_count ) ++{ ++ return STATUS_NOT_IMPLEMENTED; ++} ++ ++static NTSTATUS fast_query_mutex( HANDLE handle, MUTANT_BASIC_INFORMATION *info ) ++{ ++ return STATUS_NOT_IMPLEMENTED; ++} ++ ++static NTSTATUS fast_wait( DWORD count, const HANDLE *handles, BOOLEAN wait_any, ++ BOOLEAN alertable, const LARGE_INTEGER *timeout ) ++{ ++ return STATUS_NOT_IMPLEMENTED; ++} ++ ++static NTSTATUS fast_signal_and_wait( HANDLE signal, HANDLE wait, ++ BOOLEAN alertable, const LARGE_INTEGER *timeout ) ++{ ++ return STATUS_NOT_IMPLEMENTED; ++} ++ ++#endif ++ ++ + /****************************************************************************** + * NtCreateSemaphore (NTDLL.@) + */ +@@ -249,13 +1152,13 @@ NTSTATUS WINAPI NtCreateSemaphore( HANDLE *handle, ACCESS_MASK access, const OBJ + data_size_t len; + struct object_attributes *objattr; + ++ TRACE( "access %#x, name %s, initial %d, max %d\n", (int)access, ++ attr ? debugstr_us(attr->ObjectName) : "(null)", (int)initial, (int)max ); ++ + *handle = 0; + if (max <= 0 || initial < 0 || initial > max) return STATUS_INVALID_PARAMETER; + if ((ret = alloc_object_attributes( attr, &objattr, &len ))) return ret; + +- if (do_esync()) +- return esync_create_semaphore( handle, access, attr, initial, max ); +- + SERVER_START_REQ( create_semaphore ) + { + req->access = access; +@@ -279,11 +1182,9 @@ NTSTATUS WINAPI NtOpenSemaphore( HANDLE *handle, ACCESS_MASK access, const OBJEC + { + unsigned int ret; + +- *handle = 0; +- +- if (do_esync()) +- return esync_open_semaphore( handle, access, attr ); ++ TRACE( "access %#x, name %s\n", (int)access, attr ? debugstr_us(attr->ObjectName) : "(null)" ); + ++ *handle = 0; + if ((ret = validate_open_object_attributes( attr ))) return ret; + + SERVER_START_REQ( open_semaphore ) +@@ -320,8 +1221,11 @@ NTSTATUS WINAPI NtQuerySemaphore( HANDLE handle, SEMAPHORE_INFORMATION_CLASS cla + + if (len != sizeof(SEMAPHORE_BASIC_INFORMATION)) return STATUS_INFO_LENGTH_MISMATCH; + +- if (do_esync()) +- return esync_query_semaphore( handle, info, ret_len ); ++ if ((ret = fast_query_semaphore( handle, out )) != STATUS_NOT_IMPLEMENTED) ++ { ++ if (!ret && ret_len) *ret_len = sizeof(SEMAPHORE_BASIC_INFORMATION); ++ return ret; ++ } + + SERVER_START_REQ( query_semaphore ) + { +@@ -345,8 +1249,10 @@ NTSTATUS WINAPI NtReleaseSemaphore( HANDLE handle, ULONG count, ULONG *previous + { + unsigned int ret; + +- if (do_esync()) +- return esync_release_semaphore( handle, count, previous ); ++ TRACE( "handle %p, count %u, prev_count %p\n", handle, (int)count, previous ); ++ ++ if ((ret = fast_release_semaphore( handle, count, previous )) != STATUS_NOT_IMPLEMENTED) ++ return ret; + + SERVER_START_REQ( release_semaphore ) + { +@@ -372,12 +1278,11 @@ NTSTATUS WINAPI NtCreateEvent( HANDLE *handle, ACCESS_MASK access, const OBJECT_ + data_size_t len; + struct object_attributes *objattr; + ++ TRACE( "access %#x, name %s, type %u, state %u\n", (int)access, ++ attr ? debugstr_us(attr->ObjectName) : "(null)", type, state ); ++ + *handle = 0; + if (type != NotificationEvent && type != SynchronizationEvent) return STATUS_INVALID_PARAMETER; +- +- if (do_esync()) +- return esync_create_event( handle, access, attr, type, state ); +- + if ((ret = alloc_object_attributes( attr, &objattr, &len ))) return ret; + + SERVER_START_REQ( create_event ) +@@ -403,12 +1308,11 @@ NTSTATUS WINAPI NtOpenEvent( HANDLE *handle, ACCESS_MASK access, const OBJECT_AT + { + unsigned int ret; + ++ TRACE( "access %#x, name %s\n", (int)access, attr ? debugstr_us(attr->ObjectName) : "(null)" ); ++ + *handle = 0; + if ((ret = validate_open_object_attributes( attr ))) return ret; + +- if (do_esync()) +- return esync_open_event( handle, access, attr ); +- + SERVER_START_REQ( open_event ) + { + req->access = access; +@@ -429,11 +1333,12 @@ NTSTATUS WINAPI NtOpenEvent( HANDLE *handle, ACCESS_MASK access, const OBJECT_AT + */ + NTSTATUS WINAPI NtSetEvent( HANDLE handle, LONG *prev_state ) + { +- /* This comment is a dummy to make sure this patch applies in the right place. */ + unsigned int ret; + +- if (do_esync()) +- return esync_set_event( handle ); ++ TRACE( "handle %p, prev_state %p\n", handle, prev_state ); ++ ++ if ((ret = fast_set_event( handle, prev_state )) != STATUS_NOT_IMPLEMENTED) ++ return ret; + + SERVER_START_REQ( event_op ) + { +@@ -452,12 +1357,12 @@ NTSTATUS WINAPI NtSetEvent( HANDLE handle, LONG *prev_state ) + */ + NTSTATUS WINAPI NtResetEvent( HANDLE handle, LONG *prev_state ) + { +- /* This comment is a dummy to make sure this patch applies in the right place. */ + unsigned int ret; + +- if (do_esync()) +- return esync_reset_event( handle ); ++ TRACE( "handle %p, prev_state %p\n", handle, prev_state ); + ++ if ((ret = fast_reset_event( handle, prev_state )) != STATUS_NOT_IMPLEMENTED) ++ return ret; + + SERVER_START_REQ( event_op ) + { +@@ -488,8 +1393,10 @@ NTSTATUS WINAPI NtPulseEvent( HANDLE handle, LONG *prev_state ) + { + unsigned int ret; + +- if (do_esync()) +- return esync_pulse_event( handle ); ++ TRACE( "handle %p, prev_state %p\n", handle, prev_state ); ++ ++ if ((ret = fast_pulse_event( handle, prev_state )) != STATUS_NOT_IMPLEMENTED) ++ return ret; + + SERVER_START_REQ( event_op ) + { +@@ -522,8 +1429,11 @@ NTSTATUS WINAPI NtQueryEvent( HANDLE handle, EVENT_INFORMATION_CLASS class, + + if (len != sizeof(EVENT_BASIC_INFORMATION)) return STATUS_INFO_LENGTH_MISMATCH; + +- if (do_esync()) +- return esync_query_event( handle, info, ret_len ); ++ if ((ret = fast_query_event( handle, out )) != STATUS_NOT_IMPLEMENTED) ++ { ++ if (!ret && ret_len) *ret_len = sizeof(EVENT_BASIC_INFORMATION); ++ return ret; ++ } + + SERVER_START_REQ( query_event ) + { +@@ -550,11 +1460,10 @@ NTSTATUS WINAPI NtCreateMutant( HANDLE *handle, ACCESS_MASK access, const OBJECT + data_size_t len; + struct object_attributes *objattr; + +- *handle = 0; +- +- if (do_esync()) +- return esync_create_mutex( handle, access, attr, owned ); ++ TRACE( "access %#x, name %s, owned %u\n", (int)access, ++ attr ? debugstr_us(attr->ObjectName) : "(null)", owned ); + ++ *handle = 0; + if ((ret = alloc_object_attributes( attr, &objattr, &len ))) return ret; + + SERVER_START_REQ( create_mutex ) +@@ -579,12 +1488,11 @@ NTSTATUS WINAPI NtOpenMutant( HANDLE *handle, ACCESS_MASK access, const OBJECT_A + { + unsigned int ret; + ++ TRACE( "access %#x, name %s\n", (int)access, attr ? debugstr_us(attr->ObjectName) : "(null)" ); ++ + *handle = 0; + if ((ret = validate_open_object_attributes( attr ))) return ret; + +- if (do_esync()) +- return esync_open_mutex( handle, access, attr ); +- + SERVER_START_REQ( open_mutex ) + { + req->access = access; +@@ -607,8 +1515,10 @@ NTSTATUS WINAPI NtReleaseMutant( HANDLE handle, LONG *prev_count ) + { + unsigned int ret; + +- if (do_esync()) +- return esync_release_mutex( handle, prev_count ); ++ TRACE( "handle %p, prev_count %p\n", handle, prev_count ); ++ ++ if ((ret = fast_release_mutex( handle, prev_count )) != STATUS_NOT_IMPLEMENTED) ++ return ret; + + SERVER_START_REQ( release_mutex ) + { +@@ -640,8 +1550,11 @@ NTSTATUS WINAPI NtQueryMutant( HANDLE handle, MUTANT_INFORMATION_CLASS class, + + if (len != sizeof(MUTANT_BASIC_INFORMATION)) return STATUS_INFO_LENGTH_MISMATCH; + +- if (do_esync()) +- return esync_query_mutex( handle, info, ret_len ); ++ if ((ret = fast_query_mutex( handle, out )) != STATUS_NOT_IMPLEMENTED) ++ { ++ if (!ret && ret_len) *ret_len = sizeof(MUTANT_BASIC_INFORMATION); ++ return ret; ++ } + + SERVER_START_REQ( query_mutex ) + { +@@ -1350,6 +2263,9 @@ NTSTATUS WINAPI NtCreateTimer( HANDLE *handle, ACCESS_MASK access, const OBJECT_ + data_size_t len; + struct object_attributes *objattr; + ++ TRACE( "access %#x, name %s, type %u\n", (int)access, ++ attr ? debugstr_us(attr->ObjectName) : "(null)", type ); ++ + *handle = 0; + if (type != NotificationTimer && type != SynchronizationTimer) return STATUS_INVALID_PARAMETER; + if ((ret = alloc_object_attributes( attr, &objattr, &len ))) return ret; +@@ -1377,6 +2293,8 @@ NTSTATUS WINAPI NtOpenTimer( HANDLE *handle, ACCESS_MASK access, const OBJECT_AT + { + unsigned int ret; + ++ TRACE( "access %#x, name %s\n", (int)access, attr ? debugstr_us(attr->ObjectName) : "(null)" ); ++ + *handle = 0; + if ((ret = validate_open_object_attributes( attr ))) return ret; + +@@ -1430,6 +2348,8 @@ NTSTATUS WINAPI NtCancelTimer( HANDLE handle, BOOLEAN *state ) + { + unsigned int ret; + ++ TRACE( "handle %p, state %p\n", handle, state ); ++ + SERVER_START_REQ( cancel_timer ) + { + req->handle = wine_server_obj_handle( handle ); +@@ -1498,20 +2418,29 @@ NTSTATUS WINAPI NtWaitForMultipleObjects( DWORD count, const HANDLE *handles, BO + { + select_op_t select_op; + UINT i, flags = SELECT_INTERRUPTIBLE; ++ unsigned int ret; + + if (!count || count > MAXIMUM_WAIT_OBJECTS) return STATUS_INVALID_PARAMETER_1; + +- if (do_esync()) ++ if (TRACE_ON(sync)) + { +- NTSTATUS ret = esync_wait_objects( count, handles, wait_any, alertable, timeout ); +- if (ret != STATUS_NOT_IMPLEMENTED) +- return ret; ++ TRACE( "wait_any %u, alertable %u, handles {%p", wait_any, alertable, handles[0] ); ++ for (i = 1; i < count; i++) TRACE( ", %p", handles[i] ); ++ TRACE( "}, timeout %s\n", debugstr_timeout(timeout) ); ++ } ++ ++ if ((ret = fast_wait( count, handles, wait_any, alertable, timeout )) != STATUS_NOT_IMPLEMENTED) ++ { ++ TRACE( "-> %#x\n", ret ); ++ return ret; + } + + if (alertable) flags |= SELECT_ALERTABLE; + select_op.wait.op = wait_any ? SELECT_WAIT : SELECT_WAIT_ALL; + for (i = 0; i < count; i++) select_op.wait.handles[i] = wine_server_obj_handle( handles[i] ); +- return server_wait( &select_op, offsetof( select_op_t, wait.handles[count] ), flags, timeout ); ++ ret = server_wait( &select_op, offsetof( select_op_t, wait.handles[count] ), flags, timeout ); ++ TRACE( "-> %#x\n", ret ); ++ return ret; + } + + +@@ -1532,12 +2461,15 @@ NTSTATUS WINAPI NtSignalAndWaitForSingleObject( HANDLE signal, HANDLE wait, + { + select_op_t select_op; + UINT flags = SELECT_INTERRUPTIBLE; ++ NTSTATUS ret; + +- if (do_esync()) +- return esync_signal_and_wait( signal, wait, alertable, timeout ); ++ TRACE( "signal %p, wait %p, alertable %u, timeout %s\n", signal, wait, alertable, debugstr_timeout(timeout) ); + + if (!signal) return STATUS_INVALID_HANDLE; + ++ if ((ret = fast_signal_and_wait( signal, wait, alertable, timeout )) != STATUS_NOT_IMPLEMENTED) ++ return ret; ++ + if (alertable) flags |= SELECT_ALERTABLE; + select_op.signal_and_wait.op = SELECT_SIGNAL_AND_WAIT; + select_op.signal_and_wait.wait = wine_server_obj_handle( wait ); +@@ -1760,6 +2692,9 @@ NTSTATUS WINAPI NtCreateKeyedEvent( HANDLE *handle, ACCESS_MASK access, + data_size_t len; + struct object_attributes *objattr; + ++ TRACE( "access %#x, name %s, flags %#x\n", (int)access, ++ attr ? debugstr_us(attr->ObjectName) : "(null)", (int)flags ); ++ + *handle = 0; + if ((ret = alloc_object_attributes( attr, &objattr, &len ))) return ret; + +@@ -1784,6 +2719,8 @@ NTSTATUS WINAPI NtOpenKeyedEvent( HANDLE *handle, ACCESS_MASK access, const OBJE + { + unsigned int ret; + ++ TRACE( "access %#x, name %s\n", (int)access, attr ? debugstr_us(attr->ObjectName) : "(null)" ); ++ + *handle = 0; + if ((ret = validate_open_object_attributes( attr ))) return ret; + +@@ -1810,6 +2747,8 @@ NTSTATUS WINAPI NtWaitForKeyedEvent( HANDLE handle, const void *key, + select_op_t select_op; + UINT flags = SELECT_INTERRUPTIBLE; + ++ TRACE( "handle %p, key %p, alertable %u, timeout %s\n", handle, key, alertable, debugstr_timeout(timeout) ); ++ + if (!handle) handle = keyed_event; + if ((ULONG_PTR)key & 1) return STATUS_INVALID_PARAMETER_1; + if (alertable) flags |= SELECT_ALERTABLE; +@@ -1829,6 +2768,8 @@ NTSTATUS WINAPI NtReleaseKeyedEvent( HANDLE handle, const void *key, + select_op_t select_op; + UINT flags = SELECT_INTERRUPTIBLE; + ++ TRACE( "handle %p, key %p, alertable %u, timeout %s\n", handle, key, alertable, debugstr_timeout(timeout) ); ++ + if (!handle) handle = keyed_event; + if ((ULONG_PTR)key & 1) return STATUS_INVALID_PARAMETER_1; + if (alertable) flags |= SELECT_ALERTABLE; +diff --git a/dlls/ntdll/unix/thread.c b/dlls/ntdll/unix/thread.c +index 7f2f1b9..3f706f4 100644 +--- a/dlls/ntdll/unix/thread.c ++++ b/dlls/ntdll/unix/thread.c +@@ -1776,7 +1776,7 @@ NTSTATUS get_thread_context( HANDLE handle, void *context, BOOL *self, USHORT ma + + if (ret == STATUS_PENDING) + { +- NtWaitForSingleObject( context_handle, FALSE, NULL ); ++ server_wait_for_object( context_handle, FALSE, NULL ); + + SERVER_START_REQ( get_thread_context ) + { +diff --git a/dlls/ntdll/unix/unix_private.h b/dlls/ntdll/unix/unix_private.h +index 4e5b282..fc5b259 100644 +--- a/dlls/ntdll/unix/unix_private.h ++++ b/dlls/ntdll/unix/unix_private.h +@@ -93,7 +93,6 @@ struct ntdll_thread_data + { + void *cpu_data[16]; /* reserved for CPU-specific data */ + void *kernel_stack; /* stack for thread startup and kernel syscalls */ +- int esync_apc_fd; /* fd to wait on for user APCs */ + int request_fd; /* fd for sending server requests */ + int reply_fd; /* fd for receiving server replies */ + int wait_fd[2]; /* fd for sleeping server requests */ +@@ -102,6 +101,7 @@ struct ntdll_thread_data + PRTL_THREAD_START_ROUTINE start; /* thread entry point */ + void *param; /* thread entry point parameter */ + void *jmp_buf; /* setjmp buffer for exception handling */ ++ int fast_alert_obj; /* fd for the fast alert event */ + }; + + C_ASSERT( sizeof(struct ntdll_thread_data) <= sizeof(((TEB *)0)->GdiTebBatch) ); +@@ -196,6 +196,8 @@ extern NTSTATUS load_start_exe( WCHAR **image, void **module ); + extern ULONG_PTR redirect_arm64ec_rva( void *module, ULONG_PTR rva, const IMAGE_ARM64EC_METADATA *metadata ); + extern void start_server( BOOL debug ); + ++extern pthread_mutex_t fd_cache_mutex; ++ + extern unsigned int server_call_unlocked( void *req_ptr ); + extern void server_enter_uninterrupted_section( pthread_mutex_t *mutex, sigset_t *sigset ); + extern void server_leave_uninterrupted_section( pthread_mutex_t *mutex, sigset_t *sigset ); +@@ -203,6 +205,7 @@ extern unsigned int server_select( const select_op_t *select_op, data_size_t siz + timeout_t abs_timeout, context_t *context, user_apc_t *user_apc ); + extern unsigned int server_wait( const select_op_t *select_op, data_size_t size, UINT flags, + const LARGE_INTEGER *timeout ); ++extern unsigned int server_wait_for_object( HANDLE handle, BOOL alertable, const LARGE_INTEGER *timeout ); + extern unsigned int server_queue_process_apc( HANDLE process, const apc_call_t *call, + apc_result_t *result ); + extern int server_get_unix_fd( HANDLE handle, unsigned int wanted_access, int *unix_fd, +@@ -353,6 +356,8 @@ extern NTSTATUS wow64_wine_spawnvp( void *args ); + + extern void dbg_init(void); + ++extern void close_fast_sync_obj( HANDLE handle ); ++ + extern NTSTATUS call_user_apc_dispatcher( CONTEXT *context_ptr, ULONG_PTR arg1, ULONG_PTR arg2, ULONG_PTR arg3, + PNTAPCFUNC func, NTSTATUS status ); + extern NTSTATUS call_user_exception_dispatcher( EXCEPTION_RECORD *rec, CONTEXT *context ); +@@ -361,6 +366,7 @@ extern void call_raise_user_exception_dispatcher(void); + #define IMAGE_DLLCHARACTERISTICS_PREFER_NATIVE 0x0010 /* Wine extension */ + + #define TICKSPERSEC 10000000 ++#define NSECPERSEC 1000000000 + #define SECS_1601_TO_1970 ((369 * 365 + 89) * (ULONGLONG)86400) + + static inline ULONGLONG ticks_from_time_t( time_t time ) +@@ -419,7 +425,7 @@ static inline async_data_t server_async( HANDLE handle, struct async_fileio *use + + static inline NTSTATUS wait_async( HANDLE handle, BOOL alertable ) + { +- return NtWaitForSingleObject( handle, alertable, NULL ); ++ return server_wait_for_object( handle, alertable, NULL ); + } + + static inline BOOL in_wow64_call(void) +diff --git a/dlls/ntdll/unix/virtual.c b/dlls/ntdll/unix/virtual.c +index 1e1af8c..a54ac45 100644 +--- a/dlls/ntdll/unix/virtual.c ++++ b/dlls/ntdll/unix/virtual.c +@@ -3712,7 +3712,6 @@ static TEB *init_teb( void *ptr, BOOL is_wow ) + teb->StaticUnicodeString.Buffer = teb->StaticUnicodeBuffer; + teb->StaticUnicodeString.MaximumLength = sizeof(teb->StaticUnicodeBuffer); + thread_data = (struct ntdll_thread_data *)&teb->GdiTebBatch; +- thread_data->esync_apc_fd = -1; + thread_data->request_fd = -1; + thread_data->reply_fd = -1; + thread_data->wait_fd[0] = -1; +diff --git a/dlls/rpcrt4/rpc_server.c b/dlls/rpcrt4/rpc_server.c +index 02193b8..eb733f6 100644 +--- a/dlls/rpcrt4/rpc_server.c ++++ b/dlls/rpcrt4/rpc_server.c +@@ -701,6 +701,10 @@ static DWORD CALLBACK RPCRT4_server_thread(LPVOID the_arg) + } + LeaveCriticalSection(&cps->cs); + ++ EnterCriticalSection(&listen_cs); ++ CloseHandle(cps->server_thread); ++ cps->server_thread = NULL; ++ LeaveCriticalSection(&listen_cs); + TRACE("done\n"); + return 0; + } +@@ -1566,10 +1570,7 @@ RPC_STATUS WINAPI RpcMgmtWaitServerListen( void ) + LIST_FOR_EACH_ENTRY(protseq, &protseqs, RpcServerProtseq, entry) + { + if ((wait_thread = protseq->server_thread)) +- { +- protseq->server_thread = NULL; + break; +- } + } + LeaveCriticalSection(&server_cs); + if (!wait_thread) +@@ -1578,7 +1579,6 @@ RPC_STATUS WINAPI RpcMgmtWaitServerListen( void ) + TRACE("waiting for thread %lu\n", GetThreadId(wait_thread)); + LeaveCriticalSection(&listen_cs); + WaitForSingleObject(wait_thread, INFINITE); +- CloseHandle(wait_thread); + EnterCriticalSection(&listen_cs); + } + if (listen_done_event == event) +diff --git a/dlls/webservices/tests/channel.c b/dlls/webservices/tests/channel.c +index c64027f..ddcf896 100644 +--- a/dlls/webservices/tests/channel.c ++++ b/dlls/webservices/tests/channel.c +@@ -1214,6 +1214,9 @@ static const char send_record_begin[] = { + static const char send_record_middle[] = { 0x01, 0x56, 0x0e, 0x42 }; + static const char send_record_end[] = { 0x08, 0x02, 0x6e, 0x73, 0x89, 0xff, 0x01, 0x01 }; + ++#pragma GCC diagnostic ignored "-Warray-bounds" ++#pragma GCC diagnostic ignored "-Wstringop-overflow" ++ + static BOOL send_dict_str( int sock, char *addr, const char *str, int dict_str_count ) + { + char buf[512], dict_buf[256], body_buf[128], dict_size_buf[5]; +diff --git a/include/config.h.in b/include/config.h.in +index a53870f..ba0bdbd 100644 +--- a/include/config.h.in ++++ b/include/config.h.in +@@ -177,6 +177,9 @@ + /* Define to 1 if you have the header file. */ + #undef HAVE_LINUX_MAJOR_H + ++/* Define to 1 if you have the header file. */ ++#undef HAVE_LINUX_NTSYNC_H ++ + /* Define to 1 if you have the header file. */ + #undef HAVE_LINUX_PARAM_H + +@@ -522,9 +519,6 @@ + /* Define to 1 if you have the header file. */ + #undef HAVE_SYS_EPOLL_H + +-/* Define to 1 if you have the header file. */ +-#undef HAVE_SYS_EVENTFD_H +- + /* Define to 1 if you have the header file. */ + #undef HAVE_SYS_EVENT_H + +diff --git a/include/wine/server_protocol.h b/include/wine/server_protocol.h +index 34655d1..1f8e10a 100644 +--- a/include/wine/server_protocol.h ++++ b/include/wine/server_protocol.h +@@ -5634,6 +5634,88 @@ struct get_next_thread_reply + }; + + ++enum fast_sync_type ++{ ++ FAST_SYNC_SEMAPHORE = 1, ++ FAST_SYNC_MUTEX, ++ FAST_SYNC_AUTO_EVENT, ++ FAST_SYNC_MANUAL_EVENT, ++ FAST_SYNC_AUTO_SERVER, ++ FAST_SYNC_MANUAL_SERVER, ++ FAST_SYNC_QUEUE, ++}; ++ ++ ++ ++struct get_linux_sync_device_request ++{ ++ struct request_header __header; ++ char __pad_12[4]; ++}; ++struct get_linux_sync_device_reply ++{ ++ struct reply_header __header; ++ obj_handle_t handle; ++ char __pad_12[4]; ++}; ++ ++ ++ ++struct get_linux_sync_obj_request ++{ ++ struct request_header __header; ++ obj_handle_t handle; ++}; ++struct get_linux_sync_obj_reply ++{ ++ struct reply_header __header; ++ obj_handle_t handle; ++ int type; ++ unsigned int access; ++ char __pad_20[4]; ++}; ++ ++ ++ ++struct fast_select_queue_request ++{ ++ struct request_header __header; ++ obj_handle_t handle; ++}; ++struct fast_select_queue_reply ++{ ++ struct reply_header __header; ++}; ++ ++ ++ ++struct fast_unselect_queue_request ++{ ++ struct request_header __header; ++ obj_handle_t handle; ++ int signaled; ++ char __pad_20[4]; ++}; ++struct fast_unselect_queue_reply ++{ ++ struct reply_header __header; ++}; ++ ++ ++ ++struct get_fast_alert_event_request ++{ ++ struct request_header __header; ++ char __pad_12[4]; ++}; ++struct get_fast_alert_event_reply ++{ ++ struct reply_header __header; ++ obj_handle_t handle; ++ char __pad_12[4]; ++}; ++ ++ + enum request + { + REQ_new_process, +@@ -5921,12 +6003,12 @@ enum request + REQ_suspend_process, + REQ_resume_process, + REQ_get_next_thread, +- REQ_create_esync, +- REQ_open_esync, +- REQ_get_esync_fd, +- REQ_esync_msgwait, ++ REQ_get_linux_sync_device, ++ REQ_get_linux_sync_obj, ++ REQ_fast_select_queue, ++ REQ_fast_unselect_queue, + REQ_set_keyboard_repeat, +- REQ_get_esync_apc_fd, ++ REQ_get_fast_alert_event, + REQ_NB_REQUESTS + }; + +@@ -6213,12 +6300,12 @@ union generic_request + struct suspend_process_request suspend_process_request; + struct resume_process_request resume_process_request; + struct get_next_thread_request get_next_thread_request; +- struct create_esync_request create_esync_request; +- struct open_esync_request open_esync_request; +- struct get_esync_fd_request get_esync_fd_request; +- struct esync_msgwait_request esync_msgwait_request; ++ struct get_linux_sync_device_request get_linux_sync_device_request; ++ struct get_linux_sync_obj_request get_linux_sync_obj_request; ++ struct fast_select_queue_request fast_select_queue_request; ++ struct fast_unselect_queue_request fast_unselect_queue_request; + struct set_keyboard_repeat_request set_keyboard_repeat_request; +- struct get_esync_apc_fd_request get_esync_apc_fd_request; ++ struct get_fast_alert_event_request get_fast_alert_event_request; + }; + union generic_reply + { +@@ -6503,12 +6595,12 @@ union generic_reply + struct suspend_process_reply suspend_process_reply; + struct resume_process_reply resume_process_reply; + struct get_next_thread_reply get_next_thread_reply; +- struct create_esync_reply create_esync_reply; +- struct open_esync_reply open_esync_reply; +- struct get_esync_fd_reply get_esync_fd_reply; +- struct esync_msgwait_reply esync_msgwait_reply; + struct set_keyboard_repeat_reply set_keyboard_repeat_reply; +- struct get_esync_apc_fd_reply get_esync_apc_fd_reply; ++ struct get_linux_sync_device_reply get_linux_sync_device_reply; ++ struct get_linux_sync_obj_reply get_linux_sync_obj_reply; ++ struct fast_select_queue_reply fast_select_queue_reply; ++ struct fast_unselect_queue_reply fast_unselect_queue_reply; ++ struct get_fast_alert_event_reply get_fast_alert_event_reply; + }; + + /* ### protocol_version begin ### */ +diff --git a/server/Makefile.in b/server/Makefile.in +index b164193..b30df66 100644 +--- a/server/Makefile.in ++++ b/server/Makefile.in +@@ -11,8 +11,8 @@ SOURCES = \ + debugger.c \ + device.c \ + directory.c \ +- esync.c \ + event.c \ ++ fast_sync.c \ + fd.c \ + file.c \ + handle.c \ +diff --git a/server/async.c b/server/async.c +index b0f9fe4..02fb966 100644 +--- a/server/async.c ++++ b/server/async.c +@@ -77,7 +77,6 @@ static const struct object_ops async_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + async_signaled, /* signaled */ +- NULL, /* get_esync_fd */ + async_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -90,6 +89,7 @@ static const struct object_ops async_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + async_destroy /* destroy */ + }; +@@ -687,7 +687,6 @@ static const struct object_ops iosb_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -700,6 +699,7 @@ static const struct object_ops iosb_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + iosb_destroy /* destroy */ + }; +diff --git a/server/atom.c b/server/atom.c +index d9824de..ba320c4 100644 +--- a/server/atom.c ++++ b/server/atom.c +@@ -79,7 +79,6 @@ static const struct object_ops atom_table_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -92,6 +91,7 @@ static const struct object_ops atom_table_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + atom_table_destroy /* destroy */ + }; +diff --git a/server/change.c b/server/change.c +index ec61711..dafa7e8 100644 +--- a/server/change.c ++++ b/server/change.c +@@ -112,7 +112,6 @@ static const struct object_ops dir_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + default_fd_signaled, /* signaled */ +- default_fd_get_esync_fd, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + dir_get_fd, /* get_fd */ +@@ -125,6 +124,7 @@ static const struct object_ops dir_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ default_fd_get_fast_sync, /* get_fast_sync */ + dir_close_handle, /* close_handle */ + dir_destroy /* destroy */ + }; +diff --git a/server/clipboard.c b/server/clipboard.c +index 8b265f2..de9f84f 100644 +--- a/server/clipboard.c ++++ b/server/clipboard.c +@@ -76,7 +76,6 @@ static const struct object_ops clipboard_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -89,6 +88,7 @@ static const struct object_ops clipboard_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + clipboard_destroy /* destroy */ + }; +diff --git a/server/completion.c b/server/completion.c +index 3d4be86..5ec6d20 100644 +--- a/server/completion.c ++++ b/server/completion.c +@@ -61,10 +61,12 @@ struct completion + struct object obj; + struct list queue; + unsigned int depth; ++ struct fast_sync *fast_sync; + }; + + static void completion_dump( struct object*, int ); + static int completion_signaled( struct object *obj, struct wait_queue_entry *entry ); ++static struct fast_sync *completion_get_fast_sync( struct object *obj ); + static void completion_destroy( struct object * ); + + static const struct object_ops completion_ops = +@@ -75,7 +77,6 @@ static const struct object_ops completion_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + completion_signaled, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -88,6 +89,7 @@ static const struct object_ops completion_ops = + default_unlink_name, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ completion_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + completion_destroy /* destroy */ + }; +@@ -110,6 +112,7 @@ static void completion_destroy( struct object *obj) + { + free( tmp ); + } ++ if (completion->fast_sync) release_object( completion->fast_sync ); + } + + static void completion_dump( struct object *obj, int verbose ) +@@ -127,6 +130,16 @@ static int completion_signaled( struct object *obj, struct wait_queue_entry *ent + return !list_empty( &completion->queue ); + } + ++static struct fast_sync *completion_get_fast_sync( struct object *obj ) ++{ ++ struct completion *completion = (struct completion *)obj; ++ ++ if (!completion->fast_sync) ++ completion->fast_sync = fast_create_event( FAST_SYNC_MANUAL_SERVER, !list_empty( &completion->queue ) ); ++ if (completion->fast_sync) grab_object( completion->fast_sync ); ++ return completion->fast_sync; ++} ++ + static struct completion *create_completion( struct object *root, const struct unicode_str *name, + unsigned int attr, unsigned int concurrent, + const struct security_descriptor *sd ) +@@ -139,6 +152,7 @@ static struct completion *create_completion( struct object *root, const struct u + { + list_init( &completion->queue ); + completion->depth = 0; ++ completion->fast_sync = NULL; + } + } + +@@ -166,6 +180,7 @@ void add_completion( struct completion *completion, apc_param_t ckey, apc_param_ + list_add_tail( &completion->queue, &msg->queue_entry ); + completion->depth++; + wake_up( &completion->obj, 1 ); ++ fast_set_event( completion->fast_sync ); + } + + /* create a completion */ +@@ -232,6 +247,8 @@ DECL_HANDLER(remove_completion) + reply->status = msg->status; + reply->information = msg->information; + free( msg ); ++ if (list_empty( &completion->queue )) ++ fast_reset_event( completion->fast_sync ); + } + + release_object( completion ); +diff --git a/server/console.c b/server/console.c +index dbd4a97..17708df 100644 +--- a/server/console.c ++++ b/server/console.c +@@ -41,7 +41,6 @@ + #include "wincon.h" + #include "winternl.h" + #include "wine/condrv.h" +-#include "esync.h" + + struct screen_buffer; + +@@ -62,6 +61,7 @@ struct console + struct fd *fd; /* for bare console, attached input fd */ + struct async_queue ioctl_q; /* ioctl queue */ + struct async_queue read_q; /* read queue */ ++ struct fast_sync *fast_sync; /* fast synchronization object */ + }; + + static void console_dump( struct object *obj, int verbose ); +@@ -73,6 +73,7 @@ static struct object *console_lookup_name( struct object *obj, struct unicode_st + static struct object *console_open_file( struct object *obj, unsigned int access, + unsigned int sharing, unsigned int options ); + static int console_add_queue( struct object *obj, struct wait_queue_entry *entry ); ++static struct fast_sync *console_get_fast_sync( struct object *obj ); + + static const struct object_ops console_ops = + { +@@ -82,7 +83,6 @@ static const struct object_ops console_ops = + console_add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + console_signaled, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + console_get_fd, /* get_fd */ +@@ -95,6 +95,7 @@ static const struct object_ops console_ops = + NULL, /* unlink_name */ + console_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ console_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + console_destroy /* destroy */ + }; +@@ -132,27 +133,27 @@ struct console_host_ioctl + + struct console_server + { +- struct object obj; /* object header */ +- struct fd *fd; /* pseudo-fd for ioctls */ +- struct console *console; /* attached console */ +- struct list queue; /* ioctl queue */ +- struct list read_queue; /* blocking read queue */ ++ struct object obj; /* object header */ ++ struct fd *fd; /* pseudo-fd for ioctls */ ++ struct console *console; /* attached console */ ++ struct list queue; /* ioctl queue */ ++ struct list read_queue; /* blocking read queue */ + unsigned int busy : 1; /* flag if server processing an ioctl */ + unsigned int once_input : 1; /* flag if input thread has already been requested */ +- int term_fd; /* UNIX terminal fd */ +- struct termios termios; /* original termios */ +- int esync_fd; ++ int term_fd; /* UNIX terminal fd */ ++ struct termios termios; /* original termios */ ++ struct fast_sync *fast_sync; /* fast synchronization object */ + }; + + static void console_server_dump( struct object *obj, int verbose ); + static void console_server_destroy( struct object *obj ); + static int console_server_signaled( struct object *obj, struct wait_queue_entry *entry ); +-static int console_server_get_esync_fd( struct object *obj, enum esync_type *type ); + static struct fd *console_server_get_fd( struct object *obj ); + static struct object *console_server_lookup_name( struct object *obj, struct unicode_str *name, + unsigned int attr, struct object *root ); + static struct object *console_server_open_file( struct object *obj, unsigned int access, + unsigned int sharing, unsigned int options ); ++static struct fast_sync *console_server_get_fast_sync( struct object *obj ); + + static const struct object_ops console_server_ops = + { +@@ -162,7 +163,6 @@ static const struct object_ops console_server_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + console_server_signaled, /* signaled */ +- console_server_get_esync_fd, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + console_server_get_fd, /* get_fd */ +@@ -175,6 +175,7 @@ static const struct object_ops console_server_ops = + NULL, /* unlink_name */ + console_server_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ console_server_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + console_server_destroy /* destroy */ + }; +@@ -223,6 +224,7 @@ static int screen_buffer_add_queue( struct object *obj, struct wait_queue_entry + static struct fd *screen_buffer_get_fd( struct object *obj ); + static struct object *screen_buffer_open_file( struct object *obj, unsigned int access, + unsigned int sharing, unsigned int options ); ++static struct fast_sync *screen_buffer_get_fast_sync( struct object *obj ); + + static const struct object_ops screen_buffer_ops = + { +@@ -232,7 +234,6 @@ static const struct object_ops screen_buffer_ops = + screen_buffer_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + screen_buffer_get_fd, /* get_fd */ +@@ -245,6 +246,7 @@ static const struct object_ops screen_buffer_ops = + NULL, /* unlink_name */ + screen_buffer_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ screen_buffer_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + screen_buffer_destroy /* destroy */ + }; +@@ -282,7 +284,6 @@ static const struct object_ops console_device_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -295,6 +296,7 @@ static const struct object_ops console_device_ops = + default_unlink_name, /* unlink_name */ + console_device_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + no_destroy /* destroy */ + }; +@@ -310,6 +312,7 @@ static struct object *console_input_open_file( struct object *obj, unsigned int + unsigned int sharing, unsigned int options ); + static int console_input_add_queue( struct object *obj, struct wait_queue_entry *entry ); + static struct fd *console_input_get_fd( struct object *obj ); ++static struct fast_sync *console_input_get_fast_sync( struct object *obj ); + static void console_input_destroy( struct object *obj ); + + static const struct object_ops console_input_ops = +@@ -320,7 +323,6 @@ static const struct object_ops console_input_ops = + console_input_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + console_input_get_fd, /* get_fd */ +@@ -333,6 +335,7 @@ static const struct object_ops console_input_ops = + default_unlink_name, /* unlink_name */ + console_input_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ console_input_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + console_input_destroy /* destroy */ + }; +@@ -368,6 +371,7 @@ static int console_output_add_queue( struct object *obj, struct wait_queue_entry + static struct fd *console_output_get_fd( struct object *obj ); + static struct object *console_output_open_file( struct object *obj, unsigned int access, + unsigned int sharing, unsigned int options ); ++static struct fast_sync *console_output_get_fast_sync( struct object *obj ); + static void console_output_destroy( struct object *obj ); + + static const struct object_ops console_output_ops = +@@ -378,7 +382,6 @@ static const struct object_ops console_output_ops = + console_output_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + console_output_get_fd, /* get_fd */ +@@ -391,6 +394,7 @@ static const struct object_ops console_output_ops = + default_unlink_name, /* unlink_name */ + console_output_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ console_output_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + console_output_destroy /* destroy */ + }; +@@ -437,7 +441,6 @@ static const struct object_ops console_connection_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + console_connection_get_fd, /* get_fd */ +@@ -450,6 +453,7 @@ static const struct object_ops console_connection_ops = + default_unlink_name, /* unlink_name */ + console_connection_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + console_connection_close_handle, /* close_handle */ + console_connection_destroy /* destroy */ + }; +@@ -549,6 +553,7 @@ static struct object *create_console(void) + console->server = NULL; + console->fd = NULL; + console->last_id = 0; ++ console->fast_sync = NULL; + init_async_queue( &console->ioctl_q ); + init_async_queue( &console->read_q ); + +@@ -588,6 +593,7 @@ static int queue_host_ioctl( struct console_server *server, unsigned int code, u + } + list_add_tail( &server->queue, &ioctl->entry ); + wake_up( &server->obj, 0 ); ++ fast_set_event( server->fast_sync ); + if (async) set_error( STATUS_PENDING ); + return 1; + } +@@ -600,8 +606,6 @@ static void disconnect_console_server( struct console_server *server ) + list_remove( &call->entry ); + console_host_ioctl_terminate( call, STATUS_CANCELLED ); + } +- if (do_esync()) +- esync_clear( server->esync_fd ); + while (!list_empty( &server->read_queue )) + { + struct console_host_ioctl *call = LIST_ENTRY( list_head( &server->read_queue ), struct console_host_ioctl, entry ); +@@ -622,6 +626,7 @@ static void disconnect_console_server( struct console_server *server ) + server->console->server = NULL; + server->console = NULL; + wake_up( &server->obj, 0 ); ++ fast_set_event( server->fast_sync ); + } + } + +@@ -776,6 +781,8 @@ static void console_destroy( struct object *obj ) + free_async_queue( &console->read_q ); + if (console->fd) + release_object( console->fd ); ++ ++ if (console->fast_sync) release_object( console->fast_sync ); + } + + static struct object *create_console_connection( struct console *console ) +@@ -823,6 +830,16 @@ static struct object *console_open_file( struct object *obj, unsigned int access + return grab_object( obj ); + } + ++static struct fast_sync *console_get_fast_sync( struct object *obj ) ++{ ++ struct console *console = (struct console *)obj; ++ ++ if (!console->fast_sync) ++ console->fast_sync = fast_create_event( FAST_SYNC_MANUAL_SERVER, console->signaled ); ++ if (console->fast_sync) grab_object( console->fast_sync ); ++ return console->fast_sync; ++} ++ + static void screen_buffer_dump( struct object *obj, int verbose ) + { + struct screen_buffer *screen_buffer = (struct screen_buffer *)obj; +@@ -872,6 +889,17 @@ static struct fd *screen_buffer_get_fd( struct object *obj ) + return NULL; + } + ++static struct fast_sync *screen_buffer_get_fast_sync( struct object *obj ) ++{ ++ struct screen_buffer *screen_buffer = (struct screen_buffer *)obj; ++ if (!screen_buffer->input) ++ { ++ set_error( STATUS_ACCESS_DENIED ); ++ return NULL; ++ } ++ return console_get_fast_sync( &screen_buffer->input->obj ); ++} ++ + static void console_server_dump( struct object *obj, int verbose ) + { + assert( obj->ops == &console_server_ops ); +@@ -884,7 +912,7 @@ static void console_server_destroy( struct object *obj ) + assert( obj->ops == &console_server_ops ); + disconnect_console_server( server ); + if (server->fd) release_object( server->fd ); +- if (do_esync()) close( server->esync_fd ); ++ if (server->fast_sync) release_object( server->fast_sync ); + } + + static struct object *console_server_lookup_name( struct object *obj, struct unicode_str *name, +@@ -926,13 +954,6 @@ static int console_server_signaled( struct object *obj, struct wait_queue_entry + return !server->console || !list_empty( &server->queue ); + } + +-static int console_server_get_esync_fd( struct object *obj, enum esync_type *type ) +-{ +- struct console_server *server = (struct console_server*)obj; +- *type = ESYNC_MANUAL_SERVER; +- return server->esync_fd; +-} +- + static struct fd *console_server_get_fd( struct object* obj ) + { + struct console_server *server = (struct console_server*)obj; +@@ -946,6 +967,17 @@ static struct object *console_server_open_file( struct object *obj, unsigned int + return grab_object( obj ); + } + ++static struct fast_sync *console_server_get_fast_sync( struct object *obj ) ++{ ++ struct console_server *server = (struct console_server *)obj; ++ int signaled = !server->console || !list_empty( &server->queue ); ++ ++ if (!server->fast_sync) ++ server->fast_sync = fast_create_event( FAST_SYNC_MANUAL_SERVER, signaled ); ++ if (server->fast_sync) grab_object( server->fast_sync ); ++ return server->fast_sync; ++} ++ + static struct object *create_console_server( void ) + { + struct console_server *server; +@@ -957,6 +989,7 @@ static struct object *create_console_server( void ) + server->term_fd = -1; + list_init( &server->queue ); + list_init( &server->read_queue ); ++ server->fast_sync = NULL; + server->fd = alloc_pseudo_fd( &console_server_fd_ops, &server->obj, FILE_SYNCHRONOUS_IO_NONALERT ); + if (!server->fd) + { +@@ -964,10 +997,6 @@ static struct object *create_console_server( void ) + return NULL; + } + allow_fd_caching(server->fd); +- server->esync_fd = -1; +- +- if (do_esync()) +- server->esync_fd = esync_create_fd( 0, 0 ); + + return &server->obj; + } +@@ -1426,6 +1455,16 @@ static struct object *console_input_open_file( struct object *obj, unsigned int + return grab_object( obj ); + } + ++static struct fast_sync *console_input_get_fast_sync( struct object *obj ) ++{ ++ if (!current->process->console) ++ { ++ set_error( STATUS_ACCESS_DENIED ); ++ return NULL; ++ } ++ return console_get_fast_sync( ¤t->process->console->obj ); ++} ++ + static void console_input_destroy( struct object *obj ) + { + struct console_input *console_input = (struct console_input *)obj; +@@ -1498,6 +1537,16 @@ static struct object *console_output_open_file( struct object *obj, unsigned int + return grab_object( obj ); + } + ++static struct fast_sync *console_output_get_fast_sync( struct object *obj ) ++{ ++ if (!current->process->console || !current->process->console->active) ++ { ++ set_error( STATUS_ACCESS_DENIED ); ++ return NULL; ++ } ++ return console_get_fast_sync( ¤t->process->console->obj ); ++} ++ + static void console_output_destroy( struct object *obj ) + { + struct console_output *console_output = (struct console_output *)obj; +@@ -1555,11 +1604,16 @@ DECL_HANDLER(get_next_console_request) + + if (!server->console->renderer) server->console->renderer = current; + +- if (!req->signal) server->console->signaled = 0; ++ if (!req->signal) ++ { ++ server->console->signaled = 0; ++ fast_reset_event( server->console->fast_sync ); ++ } + else if (!server->console->signaled) + { + server->console->signaled = 1; + wake_up( &server->console->obj, 0 ); ++ fast_set_event( server->console->fast_sync ); + } + + if (req->read) +@@ -1581,8 +1635,8 @@ DECL_HANDLER(get_next_console_request) + /* set result of previous ioctl */ + ioctl = LIST_ENTRY( list_head( &server->queue ), struct console_host_ioctl, entry ); + list_remove( &ioctl->entry ); +- if (do_esync() && list_empty( &server->queue )) +- esync_clear( server->esync_fd ); ++ if (list_empty( &server->queue )) ++ fast_reset_event( server->fast_sync ); + } + + if (ioctl) +@@ -1668,8 +1722,9 @@ DECL_HANDLER(get_next_console_request) + { + set_error( STATUS_PENDING ); + } +- if (do_esync() && list_empty( &server->queue )) +- esync_clear( server->esync_fd ); ++ ++ if (list_empty( &server->queue )) ++ fast_reset_event( server->fast_sync ); + + release_object( server ); + } +diff --git a/server/debugger.c b/server/debugger.c +index ca04d4c..7975fc4 100644 +--- a/server/debugger.c ++++ b/server/debugger.c +@@ -71,6 +71,7 @@ struct debug_obj + struct object obj; /* object header */ + struct list event_queue; /* pending events queue */ + unsigned int flags; /* debug flags */ ++ struct fast_sync *fast_sync; /* fast synchronization object */ + }; + + +@@ -86,7 +87,6 @@ static const struct object_ops debug_event_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + debug_event_signaled, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -99,12 +99,14 @@ static const struct object_ops debug_event_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + debug_event_destroy /* destroy */ + }; + + static void debug_obj_dump( struct object *obj, int verbose ); + static int debug_obj_signaled( struct object *obj, struct wait_queue_entry *entry ); ++static struct fast_sync *debug_obj_get_fast_sync( struct object *obj ); + static void debug_obj_destroy( struct object *obj ); + + static const struct object_ops debug_obj_ops = +@@ -115,7 +117,6 @@ static const struct object_ops debug_obj_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + debug_obj_signaled, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -128,6 +129,7 @@ static const struct object_ops debug_obj_ops = + default_unlink_name, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ debug_obj_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + debug_obj_destroy /* destroy */ + }; +@@ -255,6 +257,7 @@ static void link_event( struct debug_obj *debug_obj, struct debug_event *event ) + /* grab reference since debugger could be killed while trying to wake up */ + grab_object( debug_obj ); + wake_up( &debug_obj->obj, 0 ); ++ fast_set_event( debug_obj->fast_sync ); + release_object( debug_obj ); + } + } +@@ -267,6 +270,7 @@ static void resume_event( struct debug_obj *debug_obj, struct debug_event *event + { + grab_object( debug_obj ); + wake_up( &debug_obj->obj, 0 ); ++ fast_set_event( debug_obj->fast_sync ); + release_object( debug_obj ); + } + } +@@ -332,6 +336,17 @@ static int debug_obj_signaled( struct object *obj, struct wait_queue_entry *entr + return find_event_to_send( debug_obj ) != NULL; + } + ++static struct fast_sync *debug_obj_get_fast_sync( struct object *obj ) ++{ ++ struct debug_obj *debug_obj = (struct debug_obj *)obj; ++ int signaled = find_event_to_send( debug_obj ) != NULL; ++ ++ if (!debug_obj->fast_sync) ++ debug_obj->fast_sync = fast_create_event( FAST_SYNC_MANUAL_SERVER, signaled ); ++ if (debug_obj->fast_sync) grab_object( debug_obj->fast_sync ); ++ return debug_obj->fast_sync; ++} ++ + static void debug_obj_destroy( struct object *obj ) + { + struct list *ptr; +@@ -344,6 +359,8 @@ static void debug_obj_destroy( struct object *obj ) + /* free all pending events */ + while ((ptr = list_head( &debug_obj->event_queue ))) + unlink_event( debug_obj, LIST_ENTRY( ptr, struct debug_event, entry )); ++ ++ if (debug_obj->fast_sync) release_object( debug_obj->fast_sync ); + } + + struct debug_obj *get_debug_obj( struct process *process, obj_handle_t handle, unsigned int access ) +@@ -363,6 +380,7 @@ static struct debug_obj *create_debug_obj( struct object *root, const struct uni + { + debug_obj->flags = flags; + list_init( &debug_obj->event_queue ); ++ debug_obj->fast_sync = NULL; + } + } + return debug_obj; +@@ -571,6 +589,9 @@ DECL_HANDLER(wait_debug_event) + reply->tid = get_thread_id( event->sender ); + alloc_event_handles( event, current->process ); + set_reply_data( &event->data, min( get_reply_max_size(), sizeof(event->data) )); ++ ++ if (!find_event_to_send( debug_obj )) ++ fast_reset_event( debug_obj->fast_sync ); + } + else + { +diff --git a/server/device.c b/server/device.c +index c45d010..698fee6 100644 +--- a/server/device.c ++++ b/server/device.c +@@ -38,7 +38,6 @@ + #include "handle.h" + #include "request.h" + #include "process.h" +-#include "esync.h" + + /* IRP object */ + +@@ -67,7 +66,6 @@ static const struct object_ops irp_call_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -80,6 +78,7 @@ static const struct object_ops irp_call_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + irp_call_destroy /* destroy */ + }; +@@ -94,12 +93,12 @@ struct device_manager + struct list requests; /* list of pending irps across all devices */ + struct irp_call *current_call; /* call currently executed on client side */ + struct wine_rb_tree kernel_objects; /* map of objects that have client side pointer associated */ +- int esync_fd; /* esync file descriptor */ ++ struct fast_sync *fast_sync; /* fast synchronization object */ + }; + + static void device_manager_dump( struct object *obj, int verbose ); + static int device_manager_signaled( struct object *obj, struct wait_queue_entry *entry ); +-static int device_manager_get_esync_fd( struct object *obj, enum esync_type *type ); ++static struct fast_sync *device_manager_get_fast_sync( struct object *obj ); + static void device_manager_destroy( struct object *obj ); + + static const struct object_ops device_manager_ops = +@@ -110,7 +109,6 @@ static const struct object_ops device_manager_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + device_manager_signaled, /* signaled */ +- device_manager_get_esync_fd, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -123,6 +121,7 @@ static const struct object_ops device_manager_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ device_manager_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + device_manager_destroy /* destroy */ + }; +@@ -168,7 +167,6 @@ static const struct object_ops device_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -181,6 +179,7 @@ static const struct object_ops device_ops = + default_unlink_name, /* unlink_name */ + device_open_file, /* open_file */ + device_get_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + device_destroy /* destroy */ + }; +@@ -221,7 +220,6 @@ static const struct object_ops device_file_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + default_fd_signaled, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + device_file_get_fd, /* get_fd */ +@@ -234,6 +232,7 @@ static const struct object_ops device_file_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + device_file_get_kernel_obj_list, /* get_kernel_obj_list */ ++ default_fd_get_fast_sync, /* get_fast_sync */ + device_file_close_handle, /* close_handle */ + device_file_destroy /* destroy */ + }; +@@ -424,7 +423,12 @@ static void add_irp_to_queue( struct device_manager *manager, struct irp_call *i + irp->thread = thread ? (struct thread *)grab_object( thread ) : NULL; + if (irp->file) list_add_tail( &irp->file->requests, &irp->dev_entry ); + list_add_tail( &manager->requests, &irp->mgr_entry ); +- if (list_head( &manager->requests ) == &irp->mgr_entry) wake_up( &manager->obj, 0 ); /* first one */ ++ if (list_head( &manager->requests ) == &irp->mgr_entry) ++ { ++ /* first one */ ++ wake_up( &manager->obj, 0 ); ++ fast_set_event( manager->fast_sync ); ++ } + } + + static struct object *device_open_file( struct object *obj, unsigned int access, +@@ -754,13 +758,13 @@ static void delete_file( struct device_file *file ) + /* terminate all pending requests */ + LIST_FOR_EACH_ENTRY_SAFE( irp, next, &file->requests, struct irp_call, dev_entry ) + { +- if (do_esync() && file->device->manager && list_empty( &file->device->manager->requests )) +- esync_clear( file->device->manager->esync_fd ); +- + list_remove( &irp->mgr_entry ); + set_irp_result( irp, STATUS_FILE_DELETED, NULL, 0, 0 ); + } + ++ if (list_empty( &file->device->manager->requests )) ++ fast_reset_event( file->device->manager->fast_sync ); ++ + release_object( file ); + } + +@@ -792,11 +796,14 @@ static int device_manager_signaled( struct object *obj, struct wait_queue_entry + return !list_empty( &manager->requests ); + } + +-static int device_manager_get_esync_fd( struct object *obj, enum esync_type *type ) ++static struct fast_sync *device_manager_get_fast_sync( struct object *obj ) + { + struct device_manager *manager = (struct device_manager *)obj; +- *type = ESYNC_MANUAL_SERVER; +- return manager->esync_fd; ++ ++ if (!manager->fast_sync) ++ manager->fast_sync = fast_create_event( FAST_SYNC_MANUAL_SERVER, !list_empty( &manager->requests ) ); ++ if (manager->fast_sync) grab_object( manager->fast_sync ); ++ return manager->fast_sync; + } + + static void device_manager_destroy( struct object *obj ) +@@ -834,8 +841,7 @@ static void device_manager_destroy( struct object *obj ) + release_object( irp ); + } + +- if (do_esync()) +- close( manager->esync_fd ); ++ if (manager->fast_sync) release_object( manager->fast_sync ); + } + + static struct device_manager *create_device_manager(void) +@@ -845,12 +851,10 @@ static struct device_manager *create_device_manager(void) + if ((manager = alloc_object( &device_manager_ops ))) + { + manager->current_call = NULL; ++ manager->fast_sync = NULL; + list_init( &manager->devices ); + list_init( &manager->requests ); + wine_rb_init( &manager->kernel_objects, compare_kernel_object ); +- +- if (do_esync()) +- manager->esync_fd = esync_create_fd( 0, 0 ); + } + return manager; + } +@@ -1037,12 +1041,13 @@ DECL_HANDLER(get_next_device_request) + } + list_remove( &irp->mgr_entry ); + list_init( &irp->mgr_entry ); ++ ++ if (list_empty( &manager->requests )) ++ fast_reset_event( manager->fast_sync ); ++ + /* we already own the object if it's only on manager queue */ + if (irp->file) grab_object( irp ); + manager->current_call = irp; +- +- if (do_esync() && list_empty( &manager->requests )) +- esync_clear( manager->esync_fd ); + } + else close_handle( current->process, reply->next ); + } +diff --git a/server/directory.c b/server/directory.c +index bc161b9..8e32abb 100644 +--- a/server/directory.c ++++ b/server/directory.c +@@ -69,7 +69,6 @@ static const struct object_ops object_type_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -82,6 +81,7 @@ static const struct object_ops object_type_ops = + default_unlink_name, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + no_destroy /* destroy */ + }; +@@ -120,7 +120,6 @@ static const struct object_ops directory_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -133,6 +132,7 @@ static const struct object_ops directory_ops = + default_unlink_name, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + directory_destroy /* destroy */ + }; +diff --git a/server/esync.c b/server/esync.c +deleted file mode 100644 +index e193f61..0000000 +--- a/server/esync.c ++++ /dev/null +@@ -1,588 +0,0 @@ +-/* +- * eventfd-based synchronization objects +- * +- * Copyright (C) 2018 Zebediah Figura +- * +- * This library is free software; you can redistribute it and/or +- * modify it under the terms of the GNU Lesser General Public +- * License as published by the Free Software Foundation; either +- * version 2.1 of the License, or (at your option) any later version. +- * +- * This library is distributed in the hope that it will be useful, +- * but WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- * Lesser General Public License for more details. +- * +- * You should have received a copy of the GNU Lesser General Public +- * License along with this library; if not, write to the Free Software +- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA +- */ +- +-#include "config.h" +- +- +-#include +-#include +-#include +-#ifdef HAVE_SYS_EVENTFD_H +-# include +-#endif +-#include +-#ifdef HAVE_SYS_STAT_H +-# include +-#endif +-#include +- +-#include "ntstatus.h" +-#define WIN32_NO_STATUS +-#include "windef.h" +-#include "winternl.h" +- +-#include "handle.h" +-#include "request.h" +-#include "file.h" +-#include "esync.h" +- +-int do_esync(void) +-{ +-#ifdef HAVE_SYS_EVENTFD_H +- static int do_esync_cached = -1; +- +- if (do_esync_cached == -1) +- do_esync_cached = getenv("WINEESYNC") && atoi(getenv("WINEESYNC")); +- +- return do_esync_cached; +-#else +- return 0; +-#endif +-} +- +-static char shm_name[29]; +-static int shm_fd; +-static off_t shm_size; +-static void **shm_addrs; +-static int shm_addrs_size; /* length of the allocated shm_addrs array */ +-static long pagesize; +- +-static void shm_cleanup(void) +-{ +- close( shm_fd ); +- if (shm_unlink( shm_name ) == -1) +- perror( "shm_unlink" ); +-} +- +-void esync_init(void) +-{ +- struct stat st; +- +- if (fstat( config_dir_fd, &st ) == -1) +- fatal_error( "cannot stat config dir\n" ); +- +- if (st.st_ino != (unsigned long)st.st_ino) +- sprintf( shm_name, "/wine-%lx%08lx-esync", (unsigned long)((unsigned long long)st.st_ino >> 32), (unsigned long)st.st_ino ); +- else +- sprintf( shm_name, "/wine-%lx-esync", (unsigned long)st.st_ino ); +- +- shm_unlink( shm_name ); +- +- shm_fd = shm_open( shm_name, O_RDWR | O_CREAT | O_EXCL, 0644 ); +- if (shm_fd == -1) +- perror( "shm_open" ); +- +- pagesize = sysconf( _SC_PAGESIZE ); +- +- shm_addrs = calloc( 128, sizeof(shm_addrs[0]) ); +- shm_addrs_size = 128; +- +- shm_size = pagesize; +- if (ftruncate( shm_fd, shm_size ) == -1) +- perror( "ftruncate" ); +- +- fprintf( stderr, "esync: up and running.\n" ); +- +- atexit( shm_cleanup ); +-} +- +-static struct list mutex_list = LIST_INIT(mutex_list); +- +-struct esync +-{ +- struct object obj; /* object header */ +- int fd; /* eventfd file descriptor */ +- enum esync_type type; +- unsigned int shm_idx; /* index into the shared memory section */ +- struct list mutex_entry; /* entry in the mutex list (if applicable) */ +-}; +- +-static void esync_dump( struct object *obj, int verbose ); +-static int esync_get_esync_fd( struct object *obj, enum esync_type *type ); +-static unsigned int esync_map_access( struct object *obj, unsigned int access ); +-static void esync_destroy( struct object *obj ); +- +-const struct object_ops esync_ops = +-{ +- sizeof(struct esync), /* size */ +- &no_type, /* type */ +- esync_dump, /* dump */ +- no_add_queue, /* add_queue */ +- NULL, /* remove_queue */ +- NULL, /* signaled */ +- esync_get_esync_fd, /* get_esync_fd */ +- NULL, /* satisfied */ +- no_signal, /* signal */ +- no_get_fd, /* get_fd */ +- esync_map_access, /* map_access */ +- default_get_sd, /* get_sd */ +- default_set_sd, /* set_sd */ +- default_get_full_name, /* get_full_name */ +- no_lookup_name, /* lookup_name */ +- directory_link_name, /* link_name */ +- default_unlink_name, /* unlink_name */ +- no_open_file, /* open_file */ +- no_kernel_obj_list, /* get_kernel_obj_list */ +- no_close_handle, /* close_handle */ +- esync_destroy /* destroy */ +-}; +- +-static void esync_dump( struct object *obj, int verbose ) +-{ +- struct esync *esync = (struct esync *)obj; +- assert( obj->ops == &esync_ops ); +- fprintf( stderr, "esync fd=%d\n", esync->fd ); +-} +- +-static int esync_get_esync_fd( struct object *obj, enum esync_type *type ) +-{ +- struct esync *esync = (struct esync *)obj; +- *type = esync->type; +- return esync->fd; +-} +- +-static unsigned int esync_map_access( struct object *obj, unsigned int access ) +-{ +- /* Sync objects have the same flags. */ +- if (access & GENERIC_READ) access |= STANDARD_RIGHTS_READ | EVENT_QUERY_STATE; +- if (access & GENERIC_WRITE) access |= STANDARD_RIGHTS_WRITE | EVENT_MODIFY_STATE; +- if (access & GENERIC_EXECUTE) access |= STANDARD_RIGHTS_EXECUTE | SYNCHRONIZE; +- if (access & GENERIC_ALL) access |= STANDARD_RIGHTS_ALL | EVENT_QUERY_STATE | EVENT_MODIFY_STATE; +- return access & ~(GENERIC_READ | GENERIC_WRITE | GENERIC_EXECUTE | GENERIC_ALL); +-} +- +-static void esync_destroy( struct object *obj ) +-{ +- struct esync *esync = (struct esync *)obj; +- if (esync->type == ESYNC_MUTEX) +- list_remove( &esync->mutex_entry ); +- close( esync->fd ); +-} +- +-static int type_matches( enum esync_type type1, enum esync_type type2 ) +-{ +- return (type1 == type2) || +- ((type1 == ESYNC_AUTO_EVENT || type1 == ESYNC_MANUAL_EVENT) && +- (type2 == ESYNC_AUTO_EVENT || type2 == ESYNC_MANUAL_EVENT)); +-} +- +-static void *get_shm( unsigned int idx ) +-{ +- int entry = (idx * 8) / pagesize; +- int offset = (idx * 8) % pagesize; +- +- if (entry >= shm_addrs_size) +- { +- int new_size = max(shm_addrs_size * 2, entry + 1); +- +- if (!(shm_addrs = realloc( shm_addrs, new_size * sizeof(shm_addrs[0]) ))) +- fprintf( stderr, "esync: couldn't expand shm_addrs array to size %d\n", entry + 1 ); +- +- memset( shm_addrs + shm_addrs_size, 0, (new_size - shm_addrs_size) * sizeof(shm_addrs[0]) ); +- +- shm_addrs_size = new_size; +- } +- +- if (!shm_addrs[entry]) +- { +- void *addr = mmap( NULL, pagesize, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, entry * pagesize ); +- if (addr == (void *)-1) +- { +- fprintf( stderr, "esync: failed to map page %d (offset %#lx): ", entry, entry * pagesize ); +- perror( "mmap" ); +- } +- +- if (debug_level) +- fprintf( stderr, "esync: Mapping page %d at %p.\n", entry, addr ); +- +- if (__sync_val_compare_and_swap( &shm_addrs[entry], 0, addr )) +- munmap( addr, pagesize ); /* someone beat us to it */ +- } +- +- return (void *)((unsigned long)shm_addrs[entry] + offset); +-} +- +-struct semaphore +-{ +- int max; +- int count; +-}; +-C_ASSERT(sizeof(struct semaphore) == 8); +- +-struct mutex +-{ +- DWORD tid; +- int count; /* recursion count */ +-}; +-C_ASSERT(sizeof(struct mutex) == 8); +- +-struct event +-{ +- int signaled; +- int locked; +-}; +-C_ASSERT(sizeof(struct event) == 8); +- +-struct esync *create_esync( struct object *root, const struct unicode_str *name, +- unsigned int attr, int initval, int max, enum esync_type type, +- const struct security_descriptor *sd ) +-{ +-#ifdef HAVE_SYS_EVENTFD_H +- struct esync *esync; +- +- if ((esync = create_named_object( root, &esync_ops, name, attr, sd ))) +- { +- if (get_error() != STATUS_OBJECT_NAME_EXISTS) +- { +- int flags = EFD_CLOEXEC | EFD_NONBLOCK; +- +- if (type == ESYNC_SEMAPHORE) +- flags |= EFD_SEMAPHORE; +- +- /* initialize it if it didn't already exist */ +- esync->fd = eventfd( initval, flags ); +- if (esync->fd == -1) +- { +- perror( "eventfd" ); +- file_set_error(); +- release_object( esync ); +- return NULL; +- } +- esync->type = type; +- +- /* Use the fd as index, since that'll be unique across all +- * processes, but should hopefully end up also allowing reuse. */ +- esync->shm_idx = esync->fd + 1; /* we keep index 0 reserved */ +- while (esync->shm_idx * 8 >= shm_size) +- { +- /* Better expand the shm section. */ +- shm_size += pagesize; +- if (ftruncate( shm_fd, shm_size ) == -1) +- { +- fprintf( stderr, "esync: couldn't expand %s to size %ld: ", +- shm_name, (long)shm_size ); +- perror( "ftruncate" ); +- } +- } +- +- /* Initialize the shared memory portion. We want to do this on the +- * server side to avoid a potential though unlikely race whereby +- * the same object is opened and used between the time it's created +- * and the time its shared memory portion is initialized. */ +- switch (type) +- { +- case ESYNC_SEMAPHORE: +- { +- struct semaphore *semaphore = get_shm( esync->shm_idx ); +- semaphore->max = max; +- semaphore->count = initval; +- break; +- } +- case ESYNC_AUTO_EVENT: +- case ESYNC_MANUAL_EVENT: +- { +- struct event *event = get_shm( esync->shm_idx ); +- event->signaled = initval ? 1 : 0; +- event->locked = 0; +- break; +- } +- case ESYNC_MUTEX: +- { +- struct mutex *mutex = get_shm( esync->shm_idx ); +- mutex->tid = initval ? 0 : current->id; +- mutex->count = initval ? 0 : 1; +- list_add_tail( &mutex_list, &esync->mutex_entry ); +- break; +- } +- default: +- assert( 0 ); +- } +- } +- else +- { +- /* validate the type */ +- if (!type_matches( type, esync->type )) +- { +- release_object( &esync->obj ); +- set_error( STATUS_OBJECT_TYPE_MISMATCH ); +- return NULL; +- } +- } +- } +- return esync; +-#else +- /* FIXME: Provide a fallback implementation using pipe(). */ +- set_error( STATUS_NOT_IMPLEMENTED ); +- return NULL; +-#endif +-} +- +-/* Create a file descriptor for an existing handle. +- * Caller must close the handle when it's done; it's not linked to an esync +- * server object in any way. */ +-int esync_create_fd( int initval, int flags ) +-{ +-#ifdef HAVE_SYS_EVENTFD_H +- int fd; +- +- fd = eventfd( initval, flags | EFD_CLOEXEC | EFD_NONBLOCK ); +- if (fd == -1) +- perror( "eventfd" ); +- +- return fd; +-#else +- return -1; +-#endif +-} +- +-/* Wake up a specific fd. */ +-void esync_wake_fd( int fd ) +-{ +- static const uint64_t value = 1; +- +- if (write( fd, &value, sizeof(value) ) == -1) +- perror( "esync: write" ); +-} +- +-/* Wake up a server-side esync object. */ +-void esync_wake_up( struct object *obj ) +-{ +- enum esync_type dummy; +- int fd; +- +- if (obj->ops->get_esync_fd) +- { +- fd = obj->ops->get_esync_fd( obj, &dummy ); +- esync_wake_fd( fd ); +- } +-} +- +-void esync_clear( int fd ) +-{ +- uint64_t value; +- +- /* we don't care about the return value */ +- read( fd, &value, sizeof(value) ); +-} +- +-static inline void small_pause(void) +-{ +-#ifdef __i386__ +- __asm__ __volatile__( "rep;nop" : : : "memory" ); +-#else +- __asm__ __volatile__( "" : : : "memory" ); +-#endif +-} +- +-/* Server-side event support. */ +-void esync_set_event( struct esync *esync ) +-{ +- static const uint64_t value = 1; +- struct event *event = get_shm( esync->shm_idx ); +- +- assert( esync->obj.ops == &esync_ops ); +- assert( event != NULL ); +- +- if (debug_level) +- fprintf( stderr, "esync_set_event() fd=%d\n", esync->fd ); +- +- if (esync->type == ESYNC_MANUAL_EVENT) +- { +- /* Acquire the spinlock. */ +- while (__sync_val_compare_and_swap( &event->locked, 0, 1 )) +- small_pause(); +- } +- +- if (!__atomic_exchange_n( &event->signaled, 1, __ATOMIC_SEQ_CST )) +- { +- if (write( esync->fd, &value, sizeof(value) ) == -1) +- perror( "esync: write" ); +- } +- +- if (esync->type == ESYNC_MANUAL_EVENT) +- { +- /* Release the spinlock. */ +- event->locked = 0; +- } +-} +- +-void esync_reset_event( struct esync *esync ) +-{ +- static uint64_t value = 1; +- struct event *event = get_shm( esync->shm_idx ); +- +- assert( esync->obj.ops == &esync_ops ); +- assert( event != NULL ); +- +- if (debug_level) +- fprintf( stderr, "esync_reset_event() fd=%d\n", esync->fd ); +- +- if (esync->type == ESYNC_MANUAL_EVENT) +- { +- /* Acquire the spinlock. */ +- while (__sync_val_compare_and_swap( &event->locked, 0, 1 )) +- small_pause(); +- } +- +- /* Only bother signaling the fd if we weren't already signaled. */ +- if (__atomic_exchange_n( &event->signaled, 0, __ATOMIC_SEQ_CST )) +- { +- /* we don't care about the return value */ +- read( esync->fd, &value, sizeof(value) ); +- } +- +- if (esync->type == ESYNC_MANUAL_EVENT) +- { +- /* Release the spinlock. */ +- event->locked = 0; +- } +-} +- +-void esync_abandon_mutexes( struct thread *thread ) +-{ +- struct esync *esync; +- +- LIST_FOR_EACH_ENTRY( esync, &mutex_list, struct esync, mutex_entry ) +- { +- struct mutex *mutex = get_shm( esync->shm_idx ); +- +- if (mutex->tid == thread->id) +- { +- if (debug_level) +- fprintf( stderr, "esync_abandon_mutexes() fd=%d\n", esync->fd ); +- mutex->tid = ~0; +- mutex->count = 0; +- esync_wake_fd( esync->fd ); +- } +- } +-} +- +-DECL_HANDLER(create_esync) +-{ +- struct esync *esync; +- struct unicode_str name; +- struct object *root; +- const struct security_descriptor *sd; +- const struct object_attributes *objattr = get_req_object_attributes( &sd, &name, &root ); +- +- if (!do_esync()) +- { +- set_error( STATUS_NOT_IMPLEMENTED ); +- return; +- } +- +- if (!req->type) +- { +- set_error( STATUS_INVALID_PARAMETER ); +- return; +- } +- +- if (!objattr) return; +- +- if ((esync = create_esync( root, &name, objattr->attributes, req->initval, req->max, req->type, sd ))) +- { +- if (get_error() == STATUS_OBJECT_NAME_EXISTS) +- reply->handle = alloc_handle( current->process, esync, req->access, objattr->attributes ); +- else +- reply->handle = alloc_handle_no_access_check( current->process, esync, +- req->access, objattr->attributes ); +- +- reply->type = esync->type; +- reply->shm_idx = esync->shm_idx; +- send_client_fd( current->process, esync->fd, reply->handle ); +- release_object( esync ); +- } +- +- if (root) release_object( root ); +-} +- +-DECL_HANDLER(open_esync) +-{ +- struct unicode_str name = get_req_unicode_str(); +- +- reply->handle = open_object( current->process, req->rootdir, req->access, +- &esync_ops, &name, req->attributes ); +- +- /* send over the fd */ +- if (reply->handle) +- { +- struct esync *esync; +- +- if (!(esync = (struct esync *)get_handle_obj( current->process, reply->handle, +- 0, &esync_ops ))) +- return; +- +- if (!type_matches( req->type, esync->type )) +- { +- set_error( STATUS_OBJECT_TYPE_MISMATCH ); +- release_object( esync ); +- return; +- } +- +- reply->type = esync->type; +- reply->shm_idx = esync->shm_idx; +- +- send_client_fd( current->process, esync->fd, reply->handle ); +- release_object( esync ); +- } +-} +- +-/* Retrieve a file descriptor for an esync object which will be signaled by the +- * server. The client should only read from (i.e. wait on) this object. */ +-DECL_HANDLER(get_esync_fd) +-{ +- struct object *obj; +- enum esync_type type; +- int fd; +- +- if (!(obj = get_handle_obj( current->process, req->handle, SYNCHRONIZE, NULL ))) +- return; +- +- if (obj->ops->get_esync_fd) +- { +- fd = obj->ops->get_esync_fd( obj, &type ); +- reply->type = type; +- if (obj->ops == &esync_ops) +- { +- struct esync *esync = (struct esync *)obj; +- reply->shm_idx = esync->shm_idx; +- } +- else +- reply->shm_idx = 0; +- send_client_fd( current->process, fd, req->handle ); +- } +- else +- { +- if (debug_level) +- { +- fprintf( stderr, "%04x: esync: can't wait on object: ", current->id ); +- obj->ops->dump( obj, 0 ); +- } +- set_error( STATUS_NOT_IMPLEMENTED ); +- } +- +- release_object( obj ); +-} +- +-/* Return the fd used for waiting on user APCs. */ +-DECL_HANDLER(get_esync_apc_fd) +-{ +- send_client_fd( current->process, current->esync_apc_fd, current->id ); +-} +diff --git a/server/esync.h b/server/esync.h +deleted file mode 100644 +index d39f4ef..0000000 +--- a/server/esync.h ++++ /dev/null +@@ -1,35 +0,0 @@ +-/* +- * eventfd-based synchronization objects +- * +- * Copyright (C) 2018 Zebediah Figura +- * +- * This library is free software; you can redistribute it and/or +- * modify it under the terms of the GNU Lesser General Public +- * License as published by the Free Software Foundation; either +- * version 2.1 of the License, or (at your option) any later version. +- * +- * This library is distributed in the hope that it will be useful, +- * but WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- * Lesser General Public License for more details. +- * +- * You should have received a copy of the GNU Lesser General Public +- * License along with this library; if not, write to the Free Software +- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA +- */ +- +-#include +- +-extern int do_esync(void); +-void esync_init(void); +-int esync_create_fd( int initval, int flags ); +-void esync_wake_fd( int fd ); +-void esync_wake_up( struct object *obj ); +-void esync_clear( int fd ); +- +-struct esync; +- +-extern const struct object_ops esync_ops; +-void esync_set_event( struct esync *esync ); +-void esync_reset_event( struct esync *esync ); +-void esync_abandon_mutexes( struct thread *thread ); +diff --git a/server/event.c b/server/event.c +index f4ca3e4..b750a22 100644 +--- a/server/event.c ++++ b/server/event.c +@@ -35,7 +35,6 @@ + #include "thread.h" + #include "request.h" + #include "security.h" +-#include "esync.h" + + static const WCHAR event_name[] = {'E','v','e','n','t'}; + +@@ -57,15 +56,15 @@ struct event + struct list kernel_object; /* list of kernel object pointers */ + int manual_reset; /* is it a manual reset event? */ + int signaled; /* event has been signaled */ +- int esync_fd; /* esync file descriptor */ ++ struct fast_sync *fast_sync; /* fast synchronization object */ + }; + + static void event_dump( struct object *obj, int verbose ); + static int event_signaled( struct object *obj, struct wait_queue_entry *entry ); + static void event_satisfied( struct object *obj, struct wait_queue_entry *entry ); +-static int event_get_esync_fd( struct object *obj, enum esync_type *type ); + static int event_signal( struct object *obj, unsigned int access); + static struct list *event_get_kernel_obj_list( struct object *obj ); ++static struct fast_sync *event_get_fast_sync( struct object *obj ); + static void event_destroy( struct object *obj ); + + static const struct object_ops event_ops = +@@ -76,7 +75,6 @@ static const struct object_ops event_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + event_signaled, /* signaled */ +- event_get_esync_fd, /* get_esync_fd */ + event_satisfied, /* satisfied */ + event_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -89,6 +87,7 @@ static const struct object_ops event_ops = + default_unlink_name, /* unlink_name */ + no_open_file, /* open_file */ + event_get_kernel_obj_list, /* get_kernel_obj_list */ ++ event_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + event_destroy /* destroy */ + }; +@@ -111,10 +110,13 @@ struct type_descr keyed_event_type = + struct keyed_event + { + struct object obj; /* object header */ ++ struct fast_sync *fast_sync; /* fast synchronization object */ + }; + + static void keyed_event_dump( struct object *obj, int verbose ); + static int keyed_event_signaled( struct object *obj, struct wait_queue_entry *entry ); ++static struct fast_sync *keyed_event_get_fast_sync( struct object *obj ); ++static void keyed_event_destroy( struct object *obj ); + + static const struct object_ops keyed_event_ops = + { +@@ -124,7 +126,6 @@ static const struct object_ops keyed_event_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + keyed_event_signaled, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -137,8 +138,9 @@ static const struct object_ops keyed_event_ops = + default_unlink_name, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ keyed_event_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ +- no_destroy /* destroy */ ++ keyed_event_destroy /* destroy */ + }; + + +@@ -156,9 +158,7 @@ struct event *create_event( struct object *root, const struct unicode_str *name, + list_init( &event->kernel_object ); + event->manual_reset = manual_reset; + event->signaled = initial_state; +- +- if (do_esync()) +- event->esync_fd = esync_create_fd( initial_state, 0 ); ++ event->fast_sync = NULL; + } + } + return event; +@@ -166,10 +166,6 @@ struct event *create_event( struct object *root, const struct unicode_str *name, + + struct event *get_event_obj( struct process *process, obj_handle_t handle, unsigned int access ) + { +- struct object *obj; +- if (do_esync() && (obj = get_handle_obj( process, handle, access, &esync_ops))) +- return (struct event *)obj; /* even though it's not an event */ +- + return (struct event *)get_handle_obj( process, handle, access, &event_ops ); + } + +@@ -183,28 +179,16 @@ static void pulse_event( struct event *event ) + + void set_event( struct event *event ) + { +- if (do_esync() && event->obj.ops == &esync_ops) +- { +- esync_set_event( (struct esync *)event ); +- return; +- } +- + event->signaled = 1; + /* wake up all waiters if manual reset, a single one otherwise */ + wake_up( &event->obj, !event->manual_reset ); ++ fast_set_event( event->fast_sync ); + } + + void reset_event( struct event *event ) + { +- if (do_esync() && event->obj.ops == &esync_ops) +- { +- esync_reset_event( (struct esync *)event ); +- return; +- } + event->signaled = 0; +- +- if (do_esync()) +- esync_clear( event->esync_fd ); ++ fast_reset_event( event->fast_sync ); + } + + static void event_dump( struct object *obj, int verbose ) +@@ -222,13 +206,6 @@ static int event_signaled( struct object *obj, struct wait_queue_entry *entry ) + return event->signaled; + } + +-static int event_get_esync_fd( struct object *obj, enum esync_type *type ) +-{ +- struct event *event = (struct event *)obj; +- *type = event->manual_reset ? ESYNC_MANUAL_SERVER : ESYNC_AUTO_SERVER; +- return event->esync_fd; +-} +- + static void event_satisfied( struct object *obj, struct wait_queue_entry *entry ) + { + struct event *event = (struct event *)obj; +@@ -257,12 +234,24 @@ static struct list *event_get_kernel_obj_list( struct object *obj ) + return &event->kernel_object; + } + ++static struct fast_sync *event_get_fast_sync( struct object *obj ) ++{ ++ struct event *event = (struct event *)obj; ++ ++ if (!event->fast_sync) ++ { ++ enum fast_sync_type type = event->manual_reset ? FAST_SYNC_MANUAL_EVENT : FAST_SYNC_AUTO_EVENT; ++ event->fast_sync = fast_create_event( type, event->signaled ); ++ } ++ if (event->fast_sync) grab_object( event->fast_sync ); ++ return event->fast_sync; ++} ++ + static void event_destroy( struct object *obj ) + { + struct event *event = (struct event *)obj; + +- if (do_esync()) +- close( event->esync_fd ); ++ if (event->fast_sync) release_object( event->fast_sync ); + } + + struct keyed_event *create_keyed_event( struct object *root, const struct unicode_str *name, +@@ -275,6 +264,7 @@ struct keyed_event *create_keyed_event( struct object *root, const struct unicod + if (get_error() != STATUS_OBJECT_NAME_EXISTS) + { + /* initialize it if it didn't already exist */ ++ event->fast_sync = NULL; + } + } + return event; +@@ -318,6 +308,23 @@ static int keyed_event_signaled( struct object *obj, struct wait_queue_entry *en + return 0; + } + ++static struct fast_sync *keyed_event_get_fast_sync( struct object *obj ) ++{ ++ struct keyed_event *event = (struct keyed_event *)obj; ++ ++ if (!event->fast_sync) ++ event->fast_sync = fast_create_event( FAST_SYNC_MANUAL_SERVER, 1 ); ++ if (event->fast_sync) grab_object( event->fast_sync ); ++ return event->fast_sync; ++} ++ ++static void keyed_event_destroy( struct object *obj ) ++{ ++ struct keyed_event *event = (struct keyed_event *)obj; ++ ++ if (event->fast_sync) release_object( event->fast_sync ); ++} ++ + /* create an event */ + DECL_HANDLER(create_event) + { +diff --git a/server/fast_sync.c b/server/fast_sync.c +new file mode 100644 +index 0000000..fed6eb9 +--- /dev/null ++++ b/server/fast_sync.c +@@ -0,0 +1,434 @@ ++/* ++ * Fast synchronization primitives ++ * ++ * Copyright (C) 2021-2022 Elizabeth Figura for CodeWeavers ++ * ++ * This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public ++ * License along with this library; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA ++ */ ++ ++#include "config.h" ++ ++#include ++#include ++#include ++#include ++ ++#include "ntstatus.h" ++#define WIN32_NO_STATUS ++#include "winternl.h" ++ ++#include "file.h" ++#include "handle.h" ++#include "request.h" ++#include "thread.h" ++ ++#ifdef HAVE_LINUX_NTSYNC_H ++ ++#include ++#include ++#include ++#include ++#include ++ ++struct linux_device ++{ ++ struct object obj; /* object header */ ++ struct fd *fd; /* fd for unix fd */ ++}; ++ ++static struct linux_device *linux_device_object; ++ ++static void linux_device_dump( struct object *obj, int verbose ); ++static struct fd *linux_device_get_fd( struct object *obj ); ++static void linux_device_destroy( struct object *obj ); ++static enum server_fd_type fast_sync_get_fd_type( struct fd *fd ); ++ ++static const struct object_ops linux_device_ops = ++{ ++ sizeof(struct linux_device), /* size */ ++ &no_type, /* type */ ++ linux_device_dump, /* dump */ ++ no_add_queue, /* add_queue */ ++ NULL, /* remove_queue */ ++ NULL, /* signaled */ ++ NULL, /* satisfied */ ++ no_signal, /* signal */ ++ linux_device_get_fd, /* get_fd */ ++ default_map_access, /* map_access */ ++ default_get_sd, /* get_sd */ ++ default_set_sd, /* set_sd */ ++ no_get_full_name, /* get_full_name */ ++ no_lookup_name, /* lookup_name */ ++ no_link_name, /* link_name */ ++ NULL, /* unlink_name */ ++ no_open_file, /* open_file */ ++ no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ ++ no_close_handle, /* close_handle */ ++ linux_device_destroy /* destroy */ ++}; ++ ++static const struct fd_ops fast_sync_fd_ops = ++{ ++ default_fd_get_poll_events, /* get_poll_events */ ++ default_poll_event, /* poll_event */ ++ fast_sync_get_fd_type, /* get_fd_type */ ++ no_fd_read, /* read */ ++ no_fd_write, /* write */ ++ no_fd_flush, /* flush */ ++ no_fd_get_file_info, /* get_file_info */ ++ no_fd_get_volume_info, /* get_volume_info */ ++ no_fd_ioctl, /* ioctl */ ++ default_fd_cancel_async, /* cancel_async */ ++ no_fd_queue_async, /* queue_async */ ++ default_fd_reselect_async /* reselect_async */ ++}; ++ ++static void linux_device_dump( struct object *obj, int verbose ) ++{ ++ struct linux_device *device = (struct linux_device *)obj; ++ assert( obj->ops == &linux_device_ops ); ++ fprintf( stderr, "Fast synchronization device fd=%p\n", device->fd ); ++} ++ ++static struct fd *linux_device_get_fd( struct object *obj ) ++{ ++ struct linux_device *device = (struct linux_device *)obj; ++ return (struct fd *)grab_object( device->fd ); ++} ++ ++static void linux_device_destroy( struct object *obj ) ++{ ++ struct linux_device *device = (struct linux_device *)obj; ++ assert( obj->ops == &linux_device_ops ); ++ if (device->fd) release_object( device->fd ); ++ linux_device_object = NULL; ++} ++ ++static enum server_fd_type fast_sync_get_fd_type( struct fd *fd ) ++{ ++ return FD_TYPE_FILE; ++} ++ ++static struct linux_device *get_linux_device(void) ++{ ++ struct linux_device *device; ++ static int initialized; ++ int unix_fd; ++ ++ if (initialized) ++ { ++ if (linux_device_object) ++ grab_object( linux_device_object ); ++ else ++ set_error( STATUS_NOT_IMPLEMENTED ); ++ return linux_device_object; ++ } ++ ++ if (getenv( "WINE_DISABLE_FAST_SYNC" ) && atoi( getenv( "WINE_DISABLE_FAST_SYNC" ) )) ++ { ++ static int once; ++ set_error( STATUS_NOT_IMPLEMENTED ); ++ if (!once++) fprintf(stderr, "ntsync is explicitly disabled.\n"); ++ initialized = 1; ++ return NULL; ++ } ++ ++ unix_fd = open( "/dev/ntsync", O_CLOEXEC | O_RDONLY ); ++ if (unix_fd == -1) ++ { ++ static int once; ++ file_set_error(); ++ if (!once++) fprintf(stderr, "Cannot open /dev/ntsync: %s\n", strerror(errno)); ++ initialized = 1; ++ return NULL; ++ } ++ ++ if (!(device = alloc_object( &linux_device_ops ))) ++ { ++ close( unix_fd ); ++ set_error( STATUS_NO_MEMORY ); ++ initialized = 1; ++ return NULL; ++ } ++ ++ if (!(device->fd = create_anonymous_fd( &fast_sync_fd_ops, unix_fd, &device->obj, 0 ))) ++ { ++ release_object( device ); ++ initialized = 1; ++ return NULL; ++ } ++ ++ fprintf( stderr, "wine: using fast synchronization.\n" ); ++ linux_device_object = device; ++ initialized = 1; ++ return device; ++} ++ ++struct fast_sync ++{ ++ struct object obj; ++ enum fast_sync_type type; ++ struct fd *fd; ++}; ++ ++static void linux_obj_dump( struct object *obj, int verbose ); ++static void linux_obj_destroy( struct object *obj ); ++static struct fd *linux_obj_get_fd( struct object *obj ); ++ ++static const struct object_ops linux_obj_ops = ++{ ++ sizeof(struct fast_sync), /* size */ ++ &no_type, /* type */ ++ linux_obj_dump, /* dump */ ++ no_add_queue, /* add_queue */ ++ NULL, /* remove_queue */ ++ NULL, /* signaled */ ++ NULL, /* satisfied */ ++ no_signal, /* signal */ ++ linux_obj_get_fd, /* get_fd */ ++ default_map_access, /* map_access */ ++ default_get_sd, /* get_sd */ ++ default_set_sd, /* set_sd */ ++ no_get_full_name, /* get_full_name */ ++ no_lookup_name, /* lookup_name */ ++ no_link_name, /* link_name */ ++ NULL, /* unlink_name */ ++ no_open_file, /* open_file */ ++ no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ ++ no_close_handle, /* close_handle */ ++ linux_obj_destroy /* destroy */ ++}; ++ ++static void linux_obj_dump( struct object *obj, int verbose ) ++{ ++ struct fast_sync *fast_sync = (struct fast_sync *)obj; ++ assert( obj->ops == &linux_obj_ops ); ++ fprintf( stderr, "Fast synchronization object type=%u fd=%p\n", fast_sync->type, fast_sync->fd ); ++} ++ ++static void linux_obj_destroy( struct object *obj ) ++{ ++ struct fast_sync *fast_sync = (struct fast_sync *)obj; ++ assert( obj->ops == &linux_obj_ops ); ++ if (fast_sync->fd) release_object( fast_sync->fd ); ++} ++ ++static struct fd *linux_obj_get_fd( struct object *obj ) ++{ ++ struct fast_sync *fast_sync = (struct fast_sync *)obj; ++ assert( obj->ops == &linux_obj_ops ); ++ return (struct fd *)grab_object( fast_sync->fd ); ++} ++ ++static struct fast_sync *create_fast_sync( enum fast_sync_type type, int unix_fd ) ++{ ++ struct fast_sync *fast_sync; ++ ++ if (!(fast_sync = alloc_object( &linux_obj_ops ))) ++ { ++ close( unix_fd ); ++ return NULL; ++ } ++ ++ fast_sync->type = type; ++ ++ if (!(fast_sync->fd = create_anonymous_fd( &fast_sync_fd_ops, unix_fd, &fast_sync->obj, 0 ))) ++ { ++ release_object( fast_sync ); ++ return NULL; ++ } ++ ++ return fast_sync; ++} ++ ++struct fast_sync *fast_create_event( enum fast_sync_type type, int signaled ) ++{ ++ struct ntsync_event_args args = {0}; ++ struct linux_device *device; ++ ++ if (!(device = get_linux_device())) return NULL; ++ ++ args.signaled = signaled; ++ switch (type) ++ { ++ case FAST_SYNC_AUTO_EVENT: ++ case FAST_SYNC_AUTO_SERVER: ++ args.manual = 0; ++ break; ++ ++ case FAST_SYNC_MANUAL_EVENT: ++ case FAST_SYNC_MANUAL_SERVER: ++ case FAST_SYNC_QUEUE: ++ args.manual = 1; ++ break; ++ ++ case FAST_SYNC_MUTEX: ++ case FAST_SYNC_SEMAPHORE: ++ assert(0); ++ break; ++ } ++ if (ioctl( get_unix_fd( device->fd ), NTSYNC_IOC_CREATE_EVENT, &args ) < 0) ++ { ++ file_set_error(); ++ release_object( device ); ++ return NULL; ++ } ++ release_object( device ); ++ ++ return create_fast_sync( type, args.event ); ++} ++ ++struct fast_sync *fast_create_semaphore( unsigned int count, unsigned int max ) ++{ ++ struct ntsync_sem_args args = {0}; ++ struct linux_device *device; ++ ++ if (!(device = get_linux_device())) return NULL; ++ ++ args.count = count; ++ args.max = max; ++ if (ioctl( get_unix_fd( device->fd ), NTSYNC_IOC_CREATE_SEM, &args ) < 0) ++ { ++ file_set_error(); ++ release_object( device ); ++ return NULL; ++ } ++ ++ release_object( device ); ++ ++ return create_fast_sync( FAST_SYNC_SEMAPHORE, args.sem ); ++} ++ ++struct fast_sync *fast_create_mutex( thread_id_t owner, unsigned int count ) ++{ ++ struct ntsync_mutex_args args = {0}; ++ struct linux_device *device; ++ ++ if (!(device = get_linux_device())) return NULL; ++ ++ args.owner = owner; ++ args.count = count; ++ if (ioctl( get_unix_fd( device->fd ), NTSYNC_IOC_CREATE_MUTEX, &args ) < 0) ++ { ++ file_set_error(); ++ release_object( device ); ++ return NULL; ++ } ++ ++ release_object( device ); ++ ++ return create_fast_sync( FAST_SYNC_MUTEX, args.mutex ); ++} ++ ++void fast_set_event( struct fast_sync *fast_sync ) ++{ ++ __u32 count; ++ ++ if (!fast_sync) return; ++ ++ if (debug_level) fprintf( stderr, "fast_set_event %p\n", fast_sync->fd ); ++ ++ ioctl( get_unix_fd( fast_sync->fd ), NTSYNC_IOC_EVENT_SET, &count ); ++} ++ ++void fast_reset_event( struct fast_sync *fast_sync ) ++{ ++ __u32 count; ++ ++ if (!fast_sync) return; ++ ++ if (debug_level) fprintf( stderr, "fast_set_event %p\n", fast_sync->fd ); ++ ++ ioctl( get_unix_fd( fast_sync->fd ), NTSYNC_IOC_EVENT_RESET, &count ); ++} ++ ++void fast_abandon_mutex( thread_id_t tid, struct fast_sync *fast_sync ) ++{ ++ ioctl( get_unix_fd( fast_sync->fd ), NTSYNC_IOC_MUTEX_KILL, &tid ); ++} ++ ++#else ++ ++struct fast_sync *fast_create_event( enum fast_sync_type type, int signaled ) ++{ ++ set_error( STATUS_NOT_IMPLEMENTED ); ++ return NULL; ++} ++ ++struct fast_sync *fast_create_semaphore( unsigned int count, unsigned int max ) ++{ ++ set_error( STATUS_NOT_IMPLEMENTED ); ++ return NULL; ++} ++ ++struct fast_sync *fast_create_mutex( thread_id_t owner, unsigned int count ) ++{ ++ set_error( STATUS_NOT_IMPLEMENTED ); ++ return NULL; ++} ++ ++void fast_set_event( struct fast_sync *fast_sync ) ++{ ++} ++ ++void fast_reset_event( struct fast_sync *obj ) ++{ ++} ++ ++void fast_abandon_mutex( thread_id_t tid, struct fast_sync *fast_sync ) ++{ ++} ++ ++#endif ++ ++DECL_HANDLER(get_linux_sync_device) ++{ ++#ifdef HAVE_LINUX_NTSYNC_H ++ struct linux_device *device; ++ ++ if ((device = get_linux_device())) ++ { ++ reply->handle = alloc_handle( current->process, device, 0, 0 ); ++ release_object( device ); ++ } ++#else ++ set_error( STATUS_NOT_IMPLEMENTED ); ++#endif ++} ++ ++DECL_HANDLER(get_linux_sync_obj) ++{ ++#ifdef HAVE_LINUX_NTSYNC_H ++ struct object *obj; ++ ++ if ((obj = get_handle_obj( current->process, req->handle, 0, NULL ))) ++ { ++ struct fast_sync *fast_sync; ++ ++ if ((fast_sync = obj->ops->get_fast_sync( obj ))) ++ { ++ reply->handle = alloc_handle( current->process, fast_sync, 0, 0 ); ++ reply->type = fast_sync->type; ++ reply->access = get_handle_access( current->process, req->handle ); ++ release_object( fast_sync ); ++ } ++ release_object( obj ); ++ } ++#else ++ set_error( STATUS_NOT_IMPLEMENTED ); ++#endif ++} +diff --git a/server/fd.c b/server/fd.c +index a6782b0..f00e22d 100644 +--- a/server/fd.c ++++ b/server/fd.c +@@ -97,7 +97,6 @@ + #include "handle.h" + #include "process.h" + #include "request.h" +-#include "esync.h" + + #include "winternl.h" + #include "winioctl.h" +@@ -162,7 +161,7 @@ struct fd + struct completion *completion; /* completion object attached to this fd */ + apc_param_t comp_key; /* completion key to set in completion events */ + unsigned int comp_flags; /* completion flags */ +- int esync_fd; /* esync file descriptor */ ++ struct fast_sync *fast_sync; /* fast synchronization object */ + }; + + static void fd_dump( struct object *obj, int verbose ); +@@ -176,7 +175,6 @@ static const struct object_ops fd_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -189,6 +187,7 @@ static const struct object_ops fd_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + fd_destroy /* destroy */ + }; +@@ -218,7 +217,6 @@ static const struct object_ops device_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -231,6 +229,7 @@ static const struct object_ops device_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + device_destroy /* destroy */ + }; +@@ -259,7 +258,6 @@ static const struct object_ops inode_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -272,6 +270,7 @@ static const struct object_ops inode_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + inode_destroy /* destroy */ + }; +@@ -302,7 +301,6 @@ static const struct object_ops file_lock_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + file_lock_signaled, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -315,6 +313,7 @@ static const struct object_ops file_lock_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + no_destroy /* destroy */ + }; +@@ -1651,9 +1650,7 @@ static void fd_destroy( struct object *obj ) + if (fd->unix_fd != -1) close( fd->unix_fd ); + free( fd->unix_name ); + } +- +- if (do_esync()) +- close( fd->esync_fd ); ++ if (fd->fast_sync) release_object( fd->fast_sync ); + } + + /* check if the desired access is possible without violating */ +@@ -1772,16 +1769,13 @@ static struct fd *alloc_fd_object(void) + fd->poll_index = -1; + fd->completion = NULL; + fd->comp_flags = 0; +- fd->esync_fd = -1; ++ fd->fast_sync = NULL; + init_async_queue( &fd->read_q ); + init_async_queue( &fd->write_q ); + init_async_queue( &fd->wait_q ); + list_init( &fd->inode_entry ); + list_init( &fd->locks ); + +- if (do_esync()) +- fd->esync_fd = esync_create_fd( 1, 0 ); +- + if ((fd->poll_index = add_poll_user( fd )) == -1) + { + release_object( fd ); +@@ -1816,16 +1810,13 @@ struct fd *alloc_pseudo_fd( const struct fd_ops *fd_user_ops, struct object *use + fd->poll_index = -1; + fd->completion = NULL; + fd->comp_flags = 0; ++ fd->fast_sync = NULL; + fd->no_fd_status = STATUS_BAD_DEVICE_TYPE; +- fd->esync_fd = -1; + init_async_queue( &fd->read_q ); + init_async_queue( &fd->write_q ); + init_async_queue( &fd->wait_q ); + list_init( &fd->inode_entry ); + list_init( &fd->locks ); +- +- if (do_esync()) +- fd->esync_fd = esync_create_fd( 0, 0 ); + return fd; + } + +@@ -2282,10 +2273,15 @@ void set_fd_signaled( struct fd *fd, int signaled ) + { + if (fd->comp_flags & FILE_SKIP_SET_EVENT_ON_HANDLE) return; + fd->signaled = signaled; +- if (signaled) wake_up( fd->user, 0 ); +- +- if (do_esync() && !signaled) +- esync_clear( fd->esync_fd ); ++ if (signaled) ++ { ++ wake_up( fd->user, 0 ); ++ fast_set_event( fd->fast_sync ); ++ } ++ else ++ { ++ fast_reset_event( fd->fast_sync ); ++ } + } + + /* check if events are pending and if yes return which one(s) */ +@@ -2311,12 +2307,16 @@ int default_fd_signaled( struct object *obj, struct wait_queue_entry *entry ) + return ret; + } + +-int default_fd_get_esync_fd( struct object *obj, enum esync_type *type ) ++struct fast_sync *default_fd_get_fast_sync( struct object *obj ) + { + struct fd *fd = get_obj_fd( obj ); +- int ret = fd->esync_fd; +- *type = ESYNC_MANUAL_SERVER; ++ struct fast_sync *ret; ++ ++ if (!fd->fast_sync) ++ fd->fast_sync = fast_create_event( FAST_SYNC_MANUAL_SERVER, fd->signaled ); ++ ret = fd->fast_sync; + release_object( fd ); ++ if (ret) grab_object( ret ); + return ret; + } + +diff --git a/server/file.c b/server/file.c +index 828a21c..c6bdbef 100644 +--- a/server/file.c ++++ b/server/file.c +@@ -123,7 +123,6 @@ static const struct object_ops file_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + default_fd_signaled, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + file_get_fd, /* get_fd */ +@@ -136,6 +135,7 @@ static const struct object_ops file_ops = + NULL, /* unlink_name */ + file_open_file, /* open_file */ + file_get_kernel_obj_list, /* get_kernel_obj_list */ ++ default_fd_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + file_destroy /* destroy */ + }; +diff --git a/server/file.h b/server/file.h +index abad355..6b4fa51 100644 +--- a/server/file.h ++++ b/server/file.h +@@ -108,7 +108,7 @@ extern char *dup_fd_name( struct fd *root, const char *name ) __WINE_DEALLOC(fre + extern void get_nt_name( struct fd *fd, struct unicode_str *name ); + + extern int default_fd_signaled( struct object *obj, struct wait_queue_entry *entry ); +-extern int default_fd_get_esync_fd( struct object *obj, enum esync_type *type ); ++extern struct fast_sync *default_fd_get_fast_sync( struct object *obj ); + extern int default_fd_get_poll_events( struct fd *fd ); + extern void default_poll_event( struct fd *fd, int event ); + extern void fd_cancel_async( struct fd *fd, struct async *async ); +diff --git a/server/handle.c b/server/handle.c +index cf6afe4..e07f32c 100644 +--- a/server/handle.c ++++ b/server/handle.c +@@ -126,7 +126,6 @@ static const struct object_ops handle_table_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -139,6 +138,7 @@ static const struct object_ops handle_table_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + handle_table_destroy /* destroy */ + }; +diff --git a/server/hook.c b/server/hook.c +index da351d6..5a00699 100644 +--- a/server/hook.c ++++ b/server/hook.c +@@ -80,7 +80,6 @@ static const struct object_ops hook_table_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -93,6 +92,7 @@ static const struct object_ops hook_table_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + hook_table_destroy /* destroy */ + }; +diff --git a/server/mailslot.c b/server/mailslot.c +index 4cf9b73..d9807b4 100644 +--- a/server/mailslot.c ++++ b/server/mailslot.c +@@ -74,7 +74,6 @@ static const struct object_ops mailslot_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + default_fd_signaled, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + mailslot_get_fd, /* get_fd */ +@@ -87,6 +86,7 @@ static const struct object_ops mailslot_ops = + default_unlink_name, /* unlink_name */ + mailslot_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ default_fd_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + mailslot_destroy /* destroy */ + }; +@@ -134,7 +134,6 @@ static const struct object_ops mail_writer_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + mail_writer_get_fd, /* get_fd */ +@@ -147,6 +146,7 @@ static const struct object_ops mail_writer_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + mail_writer_destroy /* destroy */ + }; +@@ -198,7 +198,6 @@ static const struct object_ops mailslot_device_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -211,6 +210,7 @@ static const struct object_ops mailslot_device_ops = + default_unlink_name, /* unlink_name */ + mailslot_device_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + mailslot_device_destroy /* destroy */ + }; +@@ -229,7 +229,6 @@ static const struct object_ops mailslot_device_file_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + default_fd_signaled, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + mailslot_device_file_get_fd, /* get_fd */ +@@ -242,6 +241,7 @@ static const struct object_ops mailslot_device_file_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ default_fd_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + mailslot_device_file_destroy /* destroy */ + }; +diff --git a/server/main.c b/server/main.c +index 5062d09..ddda5f4 100644 +--- a/server/main.c ++++ b/server/main.c +@@ -34,7 +34,6 @@ + #include "thread.h" + #include "request.h" + #include "unicode.h" +-#include "esync.h" + + /* command-line options */ + int debug_level = 0; +@@ -230,9 +229,6 @@ int main( int argc, char *argv[] ) + sock_init(); + open_master_socket(); + +- if (do_esync()) +- esync_init(); +- + if (debug_level) fprintf( stderr, "wineserver: starting (pid=%ld)\n", (long) getpid() ); + set_current_time(); + init_scheduler(); +diff --git a/server/mapping.c b/server/mapping.c +index 88de806..c3cba90 100644 +--- a/server/mapping.c ++++ b/server/mapping.c +@@ -67,7 +67,6 @@ static const struct object_ops ranges_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -80,6 +79,7 @@ static const struct object_ops ranges_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + ranges_destroy /* destroy */ + }; +@@ -104,7 +104,6 @@ static const struct object_ops shared_map_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -117,6 +116,7 @@ static const struct object_ops shared_map_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + shared_map_destroy /* destroy */ + }; +@@ -178,7 +178,6 @@ static const struct object_ops mapping_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + mapping_get_fd, /* get_fd */ +@@ -191,6 +190,7 @@ static const struct object_ops mapping_ops = + default_unlink_name, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + mapping_destroy /* destroy */ + }; +diff --git a/server/mutex.c b/server/mutex.c +index 4785a83..167c236 100644 +--- a/server/mutex.c ++++ b/server/mutex.c +@@ -38,6 +38,8 @@ + + static const WCHAR mutex_name[] = {'M','u','t','a','n','t'}; + ++static struct list fast_mutexes = LIST_INIT(fast_mutexes); ++ + struct type_descr mutex_type = + { + { mutex_name, sizeof(mutex_name) }, /* name */ +@@ -57,6 +59,8 @@ struct mutex + unsigned int count; /* recursion count */ + int abandoned; /* has it been abandoned? */ + struct list entry; /* entry in owner thread mutex list */ ++ struct list fast_mutexes_entry; /* entry in fast_mutexes list */ ++ struct fast_sync *fast_sync; /* fast synchronization object */ + }; + + static void mutex_dump( struct object *obj, int verbose ); +@@ -64,6 +68,7 @@ static int mutex_signaled( struct object *obj, struct wait_queue_entry *entry ); + static void mutex_satisfied( struct object *obj, struct wait_queue_entry *entry ); + static void mutex_destroy( struct object *obj ); + static int mutex_signal( struct object *obj, unsigned int access ); ++static struct fast_sync *mutex_get_fast_sync( struct object *obj ); + + static const struct object_ops mutex_ops = + { +@@ -73,7 +78,6 @@ static const struct object_ops mutex_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + mutex_signaled, /* signaled */ +- NULL, /* get_esync_fd */ + mutex_satisfied, /* satisfied */ + mutex_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -86,6 +90,7 @@ static const struct object_ops mutex_ops = + default_unlink_name, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ mutex_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + mutex_destroy /* destroy */ + }; +@@ -128,6 +133,7 @@ static struct mutex *create_mutex( struct object *root, const struct unicode_str + mutex->owner = NULL; + mutex->abandoned = 0; + if (owned) do_grab( mutex, current ); ++ mutex->fast_sync = NULL; + } + } + return mutex; +@@ -135,16 +141,22 @@ static struct mutex *create_mutex( struct object *root, const struct unicode_str + + void abandon_mutexes( struct thread *thread ) + { ++ struct mutex *mutex; + struct list *ptr; + + while ((ptr = list_head( &thread->mutex_list )) != NULL) + { +- struct mutex *mutex = LIST_ENTRY( ptr, struct mutex, entry ); ++ mutex = LIST_ENTRY( ptr, struct mutex, entry ); + assert( mutex->owner == thread ); + mutex->count = 0; + mutex->abandoned = 1; + do_release( mutex ); + } ++ ++ LIST_FOR_EACH_ENTRY(mutex, &fast_mutexes, struct mutex, fast_mutexes_entry) ++ { ++ fast_abandon_mutex( thread->id, mutex->fast_sync ); ++ } + } + + static void mutex_dump( struct object *obj, int verbose ) +@@ -190,14 +202,34 @@ static int mutex_signal( struct object *obj, unsigned int access ) + return 1; + } + ++static struct fast_sync *mutex_get_fast_sync( struct object *obj ) ++{ ++ struct mutex *mutex = (struct mutex *)obj; ++ ++ if (!mutex->fast_sync) ++ { ++ mutex->fast_sync = fast_create_mutex( mutex->owner ? mutex->owner->id : 0, mutex->count ); ++ if (mutex->fast_sync) list_add_tail( &fast_mutexes, &mutex->fast_mutexes_entry ); ++ } ++ if (mutex->fast_sync) grab_object( mutex->fast_sync ); ++ return mutex->fast_sync; ++} ++ + static void mutex_destroy( struct object *obj ) + { + struct mutex *mutex = (struct mutex *)obj; + assert( obj->ops == &mutex_ops ); + +- if (!mutex->count) return; +- mutex->count = 0; +- do_release( mutex ); ++ if (mutex->count) ++ { ++ mutex->count = 0; ++ do_release( mutex ); ++ } ++ if (mutex->fast_sync) ++ { ++ release_object( mutex->fast_sync ); ++ list_remove( &mutex->fast_mutexes_entry ); ++ } + } + + /* create a mutex */ +diff --git a/server/named_pipe.c b/server/named_pipe.c +index a90ec60..6d8cb3e 100644 +--- a/server/named_pipe.c ++++ b/server/named_pipe.c +@@ -119,7 +119,6 @@ static const struct object_ops named_pipe_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -132,6 +131,7 @@ static const struct object_ops named_pipe_ops = + default_unlink_name, /* unlink_name */ + named_pipe_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + named_pipe_destroy /* destroy */ + }; +@@ -168,7 +168,6 @@ static const struct object_ops pipe_server_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + default_fd_signaled, /* signaled */ +- default_fd_get_esync_fd, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + pipe_end_get_fd, /* get_fd */ +@@ -181,6 +180,7 @@ static const struct object_ops pipe_server_ops = + NULL, /* unlink_name */ + pipe_server_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ default_fd_get_fast_sync, /* get_fast_sync */ + async_close_obj_handle, /* close_handle */ + pipe_server_destroy /* destroy */ + }; +@@ -213,7 +213,6 @@ static const struct object_ops pipe_client_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + default_fd_signaled, /* signaled */ +- default_fd_get_esync_fd, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + pipe_end_get_fd, /* get_fd */ +@@ -226,6 +225,7 @@ static const struct object_ops pipe_client_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ default_fd_get_fast_sync, /* get_fast_sync */ + async_close_obj_handle, /* close_handle */ + pipe_end_destroy /* destroy */ + }; +@@ -261,7 +261,6 @@ static const struct object_ops named_pipe_device_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -274,6 +273,7 @@ static const struct object_ops named_pipe_device_ops = + default_unlink_name, /* unlink_name */ + named_pipe_device_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + named_pipe_device_destroy /* destroy */ + }; +@@ -293,7 +293,6 @@ static const struct object_ops named_pipe_device_file_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + default_fd_signaled, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + named_pipe_device_file_get_fd, /* get_fd */ +@@ -306,6 +305,7 @@ static const struct object_ops named_pipe_device_file_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ default_fd_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + named_pipe_device_file_destroy /* destroy */ + }; +diff --git a/server/object.c b/server/object.c +index 29f1ea9..33fc18c 100644 +--- a/server/object.c ++++ b/server/object.c +@@ -538,6 +538,12 @@ struct fd *no_get_fd( struct object *obj ) + return NULL; + } + ++struct fast_sync *no_get_fast_sync( struct object *obj ) ++{ ++ set_error( STATUS_OBJECT_TYPE_MISMATCH ); ++ return NULL; ++} ++ + unsigned int default_map_access( struct object *obj, unsigned int access ) + { + return map_access( access, &obj->ops->type->mapping ); +diff --git a/server/object.h b/server/object.h +index 6f4bca8..f028a8e 100644 +--- a/server/object.h ++++ b/server/object.h +@@ -42,6 +42,7 @@ struct async; + struct async_queue; + struct winstation; + struct object_type; ++struct fast_sync; + + + struct unicode_str +@@ -78,8 +79,6 @@ struct object_ops + void (*remove_queue)(struct object *,struct wait_queue_entry *); + /* is object signaled? */ + int (*signaled)(struct object *,struct wait_queue_entry *); +- /* return the esync fd for this object */ +- int (*get_esync_fd)(struct object *, enum esync_type *type); + /* wait satisfied */ + void (*satisfied)(struct object *,struct wait_queue_entry *); + /* signal an object */ +@@ -105,6 +104,8 @@ struct object_ops + unsigned int options); + /* return list of kernel objects */ + struct list *(*get_kernel_obj_list)(struct object *); ++ /* get a client-waitable fast-synchronization handle to this object */ ++ struct fast_sync *(*get_fast_sync)(struct object *); + /* close a handle to this object */ + int (*close_handle)(struct object *,struct process *,obj_handle_t); + /* destroy on refcount == 0 */ +@@ -224,6 +225,17 @@ extern void reset_event( struct event *event ); + + extern void abandon_mutexes( struct thread *thread ); + ++/* fast-synchronization functions */ ++ ++extern struct fast_sync *fast_create_event( enum fast_sync_type type, int signaled ); ++extern struct fast_sync *fast_create_semaphore( unsigned int count, unsigned int max ); ++extern struct fast_sync *fast_create_mutex( thread_id_t owner, unsigned int count ); ++extern void fast_set_event( struct fast_sync *obj ); ++extern void fast_reset_event( struct fast_sync *obj ); ++extern void fast_abandon_mutex( thread_id_t tid, struct fast_sync *fast_sync ); ++ ++extern struct fast_sync *no_get_fast_sync( struct object *obj ); ++ + /* serial functions */ + + int get_serial_async_timeout(struct object *obj, int type, int count); +diff --git a/server/process.c b/server/process.c +index 6d66a7c..34451c1 100644 +--- a/server/process.c ++++ b/server/process.c +@@ -63,7 +63,6 @@ + #include "request.h" + #include "user.h" + #include "security.h" +-#include "esync.h" + + /* process object */ + +@@ -95,8 +94,8 @@ static unsigned int process_map_access( struct object *obj, unsigned int access + static struct security_descriptor *process_get_sd( struct object *obj ); + static void process_poll_event( struct fd *fd, int event ); + static struct list *process_get_kernel_obj_list( struct object *obj ); ++static struct fast_sync *process_get_fast_sync( struct object *obj ); + static void process_destroy( struct object *obj ); +-static int process_get_esync_fd( struct object *obj, enum esync_type *type ); + static void terminate_process( struct process *process, struct thread *skip, int exit_code ); + + static const struct object_ops process_ops = +@@ -107,7 +106,6 @@ static const struct object_ops process_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + process_signaled, /* signaled */ +- process_get_esync_fd, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -120,6 +118,7 @@ static const struct object_ops process_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + process_get_kernel_obj_list, /* get_kernel_obj_list */ ++ process_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + process_destroy /* destroy */ + }; +@@ -159,7 +158,6 @@ static const struct object_ops startup_info_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + startup_info_signaled, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -172,6 +170,7 @@ static const struct object_ops startup_info_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + startup_info_destroy /* destroy */ + }; +@@ -194,6 +193,7 @@ struct type_descr job_type = + + static void job_dump( struct object *obj, int verbose ); + static int job_signaled( struct object *obj, struct wait_queue_entry *entry ); ++static struct fast_sync *job_get_fast_sync( struct object *obj ); + static int job_close_handle( struct object *obj, struct process *process, obj_handle_t handle ); + static void job_destroy( struct object *obj ); + +@@ -211,6 +211,7 @@ struct job + struct job *parent; + struct list parent_job_entry; /* list entry for parent job */ + struct list child_job_list; /* list of child jobs */ ++ struct fast_sync *fast_sync; /* fast synchronization object */ + }; + + static const struct object_ops job_ops = +@@ -221,7 +222,6 @@ static const struct object_ops job_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + job_signaled, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -234,6 +234,7 @@ static const struct object_ops job_ops = + default_unlink_name, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ job_get_fast_sync, /* get_fast_sync */ + job_close_handle, /* close_handle */ + job_destroy /* destroy */ + }; +@@ -258,6 +259,7 @@ static struct job *create_job_object( struct object *root, const struct unicode_ + job->completion_port = NULL; + job->completion_key = 0; + job->parent = NULL; ++ job->fast_sync = NULL; + } + } + return job; +@@ -414,6 +416,17 @@ static void terminate_job( struct job *job, int exit_code ) + job->terminating = 0; + job->signaled = 1; + wake_up( &job->obj, 0 ); ++ fast_set_event( job->fast_sync ); ++} ++ ++static struct fast_sync *job_get_fast_sync( struct object *obj ) ++{ ++ struct job *job = (struct job *)obj; ++ ++ if (!job->fast_sync) ++ job->fast_sync = fast_create_event( FAST_SYNC_MANUAL_SERVER, job->signaled ); ++ if (job->fast_sync) grab_object( job->fast_sync ); ++ return job->fast_sync; + } + + static int job_close_handle( struct object *obj, struct process *process, obj_handle_t handle ) +@@ -444,6 +457,8 @@ static void job_destroy( struct object *obj ) + list_remove( &job->parent_job_entry ); + release_object( job->parent ); + } ++ ++ if (job->fast_sync) release_object( job->fast_sync ); + } + + static void job_dump( struct object *obj, int verbose ) +@@ -688,9 +703,9 @@ struct process *create_process( int fd, struct process *parent, unsigned int fla + process->rawinput_device_count = 0; + process->rawinput_mouse = NULL; + process->rawinput_kbd = NULL; ++ process->fast_sync = NULL; + memset( &process->image_info, 0, sizeof(process->image_info) ); + list_init( &process->rawinput_entry ); +- process->esync_fd = -1; + list_init( &process->kernel_object ); + list_init( &process->thread_list ); + list_init( &process->locks ); +@@ -747,9 +762,6 @@ struct process *create_process( int fd, struct process *parent, unsigned int fla + if (!token_assign_label( process->token, &high_label_sid )) + goto error; + +- if (do_esync()) +- process->esync_fd = esync_create_fd( 0, 0 ); +- + set_fd_events( process->msg_fd, POLLIN ); /* start listening to events */ + return process; + +@@ -798,7 +810,8 @@ static void process_destroy( struct object *obj ) + free( process->rawinput_devices ); + free( process->dir_cache ); + free( process->image ); +- if (do_esync()) close( process->esync_fd ); ++ ++ if (process->fast_sync) release_object( process->fast_sync ); + } + + /* dump a process on stdout for debugging purposes */ +@@ -816,13 +829,6 @@ static int process_signaled( struct object *obj, struct wait_queue_entry *entry + return !process->running_threads; + } + +-static int process_get_esync_fd( struct object *obj, enum esync_type *type ) +-{ +- struct process *process = (struct process *)obj; +- *type = ESYNC_MANUAL_SERVER; +- return process->esync_fd; +-} +- + static unsigned int process_map_access( struct object *obj, unsigned int access ) + { + access = default_map_access( obj, access ); +@@ -837,6 +843,16 @@ static struct list *process_get_kernel_obj_list( struct object *obj ) + return &process->kernel_object; + } + ++static struct fast_sync *process_get_fast_sync( struct object *obj ) ++{ ++ struct process *process = (struct process *)obj; ++ ++ if (!process->fast_sync) ++ process->fast_sync = fast_create_event( FAST_SYNC_MANUAL_SERVER, !process->running_threads ); ++ if (process->fast_sync) grab_object( process->fast_sync ); ++ return process->fast_sync; ++} ++ + static struct security_descriptor *process_get_sd( struct object *obj ) + { + static struct security_descriptor *process_default_sd; +@@ -1001,6 +1017,7 @@ static void process_killed( struct process *process ) + release_job_process( process ); + start_sigkill_timer( process ); + wake_up( &process->obj, 0 ); ++ fast_set_event( process->fast_sync ); + } + + /* add a thread to a process running threads list */ +diff --git a/server/process.h b/server/process.h +index bedd8bb..2140427 100644 +--- a/server/process.h ++++ b/server/process.h +@@ -86,7 +86,7 @@ struct process + struct list rawinput_entry; /* entry in the rawinput process list */ + struct list kernel_object; /* list of kernel object pointers */ + pe_image_info_t image_info; /* main exe image info */ +- int esync_fd; /* esync file descriptor (signaled on exit) */ ++ struct fast_sync *fast_sync; /* fast synchronization object */ + }; + + /* process functions */ +diff --git a/server/protocol.def b/server/protocol.def +index 4983691..7bc4208 100644 +--- a/server/protocol.def ++++ b/server/protocol.def +@@ -3887,6 +3887,7 @@ struct handle_info + obj_handle_t handle; /* process handle */ + @END + ++ + /* Iterate thread list for process */ + @REQ(get_next_thread) + obj_handle_t process; /* process handle */ +@@ -3898,63 +3899,60 @@ struct handle_info + obj_handle_t handle; /* next thread handle */ + @END + +-enum esync_type ++ ++enum fast_sync_type + { +- ESYNC_SEMAPHORE = 1, +- ESYNC_AUTO_EVENT, +- ESYNC_MANUAL_EVENT, +- ESYNC_MUTEX, +- ESYNC_AUTO_SERVER, +- ESYNC_MANUAL_SERVER, +- ESYNC_QUEUE, ++ FAST_SYNC_SEMAPHORE = 1, ++ FAST_SYNC_MUTEX, ++ FAST_SYNC_AUTO_EVENT, ++ FAST_SYNC_MANUAL_EVENT, ++ FAST_SYNC_AUTO_SERVER, ++ FAST_SYNC_MANUAL_SERVER, ++ FAST_SYNC_QUEUE, + }; + +-/* Create a new eventfd-based synchronization object */ +-@REQ(create_esync) +- unsigned int access; /* wanted access rights */ +- int initval; /* initial value */ +- int type; /* type of esync object */ +- int max; /* maximum count on a semaphore */ +- VARARG(objattr,object_attributes); /* object attributes */ ++ ++/* Obtain a handle to the fast synchronization device object */ ++@REQ(get_linux_sync_device) + @REPLY +- obj_handle_t handle; /* handle to the object */ +- int type; /* actual type (may be different for events) */ +- unsigned int shm_idx; ++ obj_handle_t handle; /* handle to the device */ + @END + +-@REQ(open_esync) +- unsigned int access; /* wanted access rights */ +- unsigned int attributes; /* object attributes */ +- obj_handle_t rootdir; /* root directory */ +- int type; /* type of esync object (above) */ +- VARARG(name,unicode_str); /* object name */ ++ ++/* Get the fast synchronization object associated with the given handle */ ++@REQ(get_linux_sync_obj) ++ obj_handle_t handle; /* handle to the object */ + @REPLY +- obj_handle_t handle; /* handle to the event */ +- int type; /* type of esync object (above) */ +- unsigned int shm_idx; /* this object's index into the shm section */ ++ obj_handle_t handle; /* handle to the fast synchronization object */ ++ int type; /* object type */ ++ unsigned int access; /* handle access rights */ + @END + +-/* Retrieve the esync fd for an object. */ +-@REQ(get_esync_fd) +- obj_handle_t handle; /* handle to the object */ +-@REPLY +- int type; +- unsigned int shm_idx; ++ ++/* Begin a client-side wait on a message queue */ ++@REQ(fast_select_queue) ++ obj_handle_t handle; /* handle to the queue */ + @END + +-@REQ(esync_msgwait) +- int in_msgwait; /* are we in a message wait? */ ++ ++/* End a client-side wait on a message queue */ ++@REQ(fast_unselect_queue) ++ obj_handle_t handle; /* handle to the queue */ ++ int signaled; /* was the queue signaled? */ + @END + + /* Setup keyboard auto-repeat */ + @REQ(set_keyboard_repeat) + int enable; /* whether to enable auto-repeat */ + int delay; /* auto-repeat delay in ms */ + int period; /* auto-repeat period in ms */ + @REPLY + int enable; /* previous state of auto-repeat enable */ + @END + +-/* Retrieve the fd to wait on for user APCs. */ +-@REQ(get_esync_apc_fd) ++ ++/* Get an event handle to be used for thread alerts with fast synchronization */ ++@REQ(get_fast_alert_event) ++@REPLY ++ obj_handle_t handle; /* handle to the event */ + @END +diff --git a/server/queue.c b/server/queue.c +index fee3a8d..287a56e 100644 +--- a/server/queue.c ++++ b/server/queue.c +@@ -43,7 +43,6 @@ + #include "process.h" + #include "request.h" + #include "user.h" +-#include "esync.h" + + #define WM_NCMOUSEFIRST WM_NCMOUSEMOVE + #define WM_NCMOUSELAST (WM_NCMOUSEFIRST+(WM_MOUSELAST-WM_MOUSEFIRST)) +@@ -147,8 +146,8 @@ struct msg_queue + timeout_t last_get_msg; /* time of last get message call */ + int keystate_lock; /* owns an input keystate lock */ + unsigned int ignore_post_msg; /* ignore post messages newer than this unique id */ +- int esync_fd; /* esync file descriptor (signalled on message) */ +- int esync_in_msgwait; /* our thread is currently waiting on us */ ++ struct fast_sync *fast_sync; /* fast synchronization object */ ++ int in_fast_wait; /* are we in a client-side wait? */ + }; + + struct hotkey +@@ -165,8 +164,8 @@ static void msg_queue_dump( struct object *obj, int verbose ); + static int msg_queue_add_queue( struct object *obj, struct wait_queue_entry *entry ); + static void msg_queue_remove_queue( struct object *obj, struct wait_queue_entry *entry ); + static int msg_queue_signaled( struct object *obj, struct wait_queue_entry *entry ); +-static int msg_queue_get_esync_fd( struct object *obj, enum esync_type *type ); + static void msg_queue_satisfied( struct object *obj, struct wait_queue_entry *entry ); ++static struct fast_sync *msg_queue_get_fast_sync( struct object *obj ); + static void msg_queue_destroy( struct object *obj ); + static void msg_queue_poll_event( struct fd *fd, int event ); + static void thread_input_dump( struct object *obj, int verbose ); +@@ -181,7 +180,6 @@ static const struct object_ops msg_queue_ops = + msg_queue_add_queue, /* add_queue */ + msg_queue_remove_queue, /* remove_queue */ + msg_queue_signaled, /* signaled */ +- msg_queue_get_esync_fd, /* get_esync_fd */ + msg_queue_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -194,6 +192,7 @@ static const struct object_ops msg_queue_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ msg_queue_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + msg_queue_destroy /* destroy */ + }; +@@ -219,7 +218,6 @@ static const struct object_ops thread_input_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -232,6 +230,7 @@ static const struct object_ops thread_input_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + thread_input_destroy /* destroy */ + }; +@@ -321,32 +320,29 @@ static struct msg_queue *create_msg_queue( struct thread *thread, struct thread_ + queue->last_get_msg = current_time; + queue->keystate_lock = 0; + queue->ignore_post_msg = 0; +- queue->esync_fd = -1; +- queue->esync_in_msgwait = 0; ++ queue->fast_sync = NULL; ++ queue->in_fast_wait = 0; + list_init( &queue->send_result ); + list_init( &queue->callback_result ); + list_init( &queue->pending_timers ); + list_init( &queue->expired_timers ); + for (i = 0; i < NB_MSG_KINDS; i++) list_init( &queue->msg_list[i] ); + + if (!(queue->shared = alloc_shared_object())) + { + release_object( queue ); + return NULL; + } + + SHARED_WRITE_BEGIN( queue->shared, queue_shm_t ) + { + memset( (void *)shared->hooks_count, 0, sizeof(shared->hooks_count) ); + shared->wake_mask = 0; + shared->wake_bits = 0; + shared->changed_mask = 0; + shared->changed_bits = 0; + } + SHARED_WRITE_END; + +- if (do_esync()) +- queue->esync_fd = esync_create_fd( 0, 0 ); +- + thread->queue = queue; + +@@ -607,7 +603,11 @@ static inline void set_queue_bits( struct msg_queue *queue, unsigned int bits ) + } + SHARED_WRITE_END; + +- if (is_signaled( queue )) wake_up( &queue->obj, 0 ); ++ if (is_signaled( queue )) ++ { ++ wake_up( &queue->obj, 0 ); ++ fast_set_event( queue->fast_sync ); ++ } + } + + /* clear some queue bits */ +@@ -620,9 +620,8 @@ static inline void clear_queue_bits( struct msg_queue *queue, unsigned int bits + if (queue->keystate_lock) unlock_input_keystate( queue->input ); + queue->keystate_lock = 0; + } +- +- if (do_esync() && !is_signaled( queue )) +- esync_clear( queue->esync_fd ); ++ if (!is_signaled( queue )) ++ fast_reset_event( queue->fast_sync ); + } + + /* check if message is matched by the filter */ +@@ -1112,8 +1111,8 @@ static int is_queue_hung( struct msg_queue *queue ) + return 0; /* thread is waiting on queue -> not hung */ + } + +- if (do_esync() && queue->esync_in_msgwait) +- return 0; /* thread is waiting on queue in absentia -> not hung */ ++ if (queue->in_fast_wait) ++ return 0; /* thread is waiting on queue in absentia -> not hung */ + + return 1; + } +@@ -1168,24 +1167,30 @@ static int msg_queue_signaled( struct object *obj, struct wait_queue_entry *entr + return ret || is_signaled( queue ); + } + +-static int msg_queue_get_esync_fd( struct object *obj, enum esync_type *type ) ++static void msg_queue_satisfied( struct object *obj, struct wait_queue_entry *entry ) + { + struct msg_queue *queue = (struct msg_queue *)obj; +- *type = ESYNC_QUEUE; +- return queue->esync_fd; ++ const queue_shm_t *queue_shm = queue->shared; ++ ++ SHARED_WRITE_BEGIN( queue_shm, queue_shm_t ) ++ { ++ shared->wake_mask = 0; ++ shared->changed_mask = 0; ++ } ++ SHARED_WRITE_END; ++ ++ fast_reset_event( queue->fast_sync ); + } + +-static void msg_queue_satisfied( struct object *obj, struct wait_queue_entry *entry ) ++static struct fast_sync *msg_queue_get_fast_sync( struct object *obj ) + { + struct msg_queue *queue = (struct msg_queue *)obj; + const queue_shm_t *queue_shm = queue->shared; + +- SHARED_WRITE_BEGIN( queue_shm, queue_shm_t ) +- { +- shared->wake_mask = 0; +- shared->changed_mask = 0; +- } +- SHARED_WRITE_END; ++ if (!queue->fast_sync) ++ queue->fast_sync = fast_create_event( FAST_SYNC_QUEUE, is_signaled( queue ) ); ++ if (queue->fast_sync) grab_object( queue->fast_sync ); ++ return queue->fast_sync; + } + + static void msg_queue_destroy( struct object *obj ) +@@ -1221,7 +1224,7 @@ static void msg_queue_destroy( struct object *obj ) + if (queue->hooks) release_object( queue->hooks ); + if (queue->fd) release_object( queue->fd ); + if (queue->shared) free_shared_object( queue->shared ); +- if (do_esync()) close( queue->esync_fd ); ++ if (queue->fast_sync) release_object( queue->fast_sync ); + } + + static void msg_queue_poll_event( struct fd *fd, int event ) +@@ -1232,6 +1235,7 @@ static void msg_queue_poll_event( struct fd *fd, int event ) + if (event & (POLLERR | POLLHUP)) set_fd_events( fd, -1 ); + else set_fd_events( queue->fd, 0 ); + wake_up( &queue->obj, 0 ); ++ fast_set_event( queue->fast_sync ); + } + + static void thread_input_dump( struct object *obj, int verbose ) +@@ -2982,11 +2982,14 @@ DECL_HANDLER(set_queue_mask) + shared->changed_mask = 0; + } + SHARED_WRITE_END; ++ ++ fast_reset_event( queue->fast_sync ); ++ } ++ else ++ { ++ wake_up( &queue->obj, 0 ); ++ fast_set_event( queue->fast_sync ); + } +- else wake_up( &queue->obj, 0 ); + } +- +- if (do_esync() && !is_signaled( queue )) +- esync_clear( queue->esync_fd ); + } + } +@@ -2867,9 +2880,9 @@ DECL_HANDLER(get_queue_status) + shared->changed_bits &= ~req->clear_bits; + } + SHARED_WRITE_END; + +- if (do_esync() && !is_signaled( queue )) +- esync_clear( queue->esync_fd ); ++ if (!is_signaled( queue )) ++ fast_reset_event( queue->fast_sync ); + } + else reply->wake_bits = reply->changed_bits = 0; + } +@@ -3049,6 +3061,9 @@ DECL_HANDLER(get_message) + if (filter & QS_INPUT) queue->changed_bits &= ~QS_INPUT; + if (filter & QS_PAINT) queue->changed_bits &= ~QS_PAINT; + ++ if (!is_signaled( queue )) ++ fast_reset_event( queue->fast_sync ); ++ + /* then check for posted messages */ + if ((filter & QS_POSTMESSAGE) && + get_posted_message( queue, queue->ignore_post_msg, get_win, req->get_first, req->get_last, req->flags, reply )) +@@ -3119,11 +3134,8 @@ DECL_HANDLER(get_message) + } + SHARED_WRITE_END; + ++ fast_reset_event( queue->fast_sync ); + set_error( STATUS_PENDING ); /* FIXME */ +- +- if (do_esync() && !is_signaled( queue )) +- esync_clear( queue->esync_fd ); +- + return; + + found_msg: +@@ -4020,21 +4020,60 @@ DECL_HANDLER(update_rawinput_devices) + } + } + +-DECL_HANDLER(esync_msgwait) ++DECL_HANDLER(fast_select_queue) + { +- struct msg_queue *queue = get_current_queue(); ++ struct msg_queue *queue; + const queue_shm_t *queue_shm; + +- if (!queue) return; ++ if (!(queue = (struct msg_queue *)get_handle_obj( current->process, req->handle, ++ SYNCHRONIZE, &msg_queue_ops ))) ++ return; + queue_shm = queue->shared; +- queue->esync_in_msgwait = req->in_msgwait; ++ /* a thread can only wait on its own queue */ ++ if (current->queue != queue || queue->in_fast_wait) ++ { ++ set_error( STATUS_ACCESS_DENIED ); ++ } ++ else ++ { ++ if (current->process->idle_event && !(queue_shm->wake_mask & QS_SMRESULT)) ++ set_event( current->process->idle_event ); ++ ++ if (queue->fd) ++ set_fd_events( queue->fd, POLLIN ); + +- if (current->process->idle_event && !(queue_shm->wake_mask & QS_SMRESULT)) +- set_event( current->process->idle_event ); ++ queue->in_fast_wait = 1; ++ } + +- /* and start/stop waiting on the driver */ +- if (queue->fd) +- set_fd_events( queue->fd, req->in_msgwait ? POLLIN : 0 ); ++ release_object( queue ); ++} ++ ++DECL_HANDLER(fast_unselect_queue) ++{ ++ struct msg_queue *queue; ++ const queue_shm_t *queue_shm; ++ ++ if (!(queue = (struct msg_queue *)get_handle_obj( current->process, req->handle, ++ SYNCHRONIZE, &msg_queue_ops ))) ++ return; ++ ++ queue_shm = queue->shared; ++ if (current->queue != queue || !queue->in_fast_wait) ++ { ++ set_error( STATUS_ACCESS_DENIED ); ++ } ++ else ++ { ++ if (queue->fd) ++ set_fd_events( queue->fd, 0 ); ++ ++ if (req->signaled) ++ msg_queue_satisfied( &queue->obj, NULL ); ++ ++ queue->in_fast_wait = 0; ++ } ++ ++ release_object( queue ); + } + + DECL_HANDLER(set_keyboard_repeat) +diff --git a/server/registry.c b/server/registry.c +index 4454de3..dd5c556 100644 +--- a/server/registry.c ++++ b/server/registry.c +@@ -180,7 +180,6 @@ static const struct object_ops key_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -193,6 +192,7 @@ static const struct object_ops key_ops = + key_unlink_name, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + key_close_handle, /* close_handle */ + key_destroy /* destroy */ + }; +diff --git a/server/request.c b/server/request.c +index ca83fdb..8c50f99 100644 +--- a/server/request.c ++++ b/server/request.c +@@ -90,7 +90,6 @@ static const struct object_ops master_socket_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -103,6 +102,7 @@ static const struct object_ops master_socket_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + master_socket_destroy /* destroy */ + }; +diff --git a/server/request.h b/server/request.h +index eb6b8f4..1636e98 100644 +--- a/server/request.h ++++ b/server/request.h +@@ -406,12 +406,12 @@ DECL_HANDLER(terminate_job); + DECL_HANDLER(suspend_process); + DECL_HANDLER(resume_process); + DECL_HANDLER(get_next_thread); +-DECL_HANDLER(create_esync); +-DECL_HANDLER(open_esync); +-DECL_HANDLER(get_esync_fd); +-DECL_HANDLER(esync_msgwait); ++DECL_HANDLER(get_linux_sync_device); ++DECL_HANDLER(get_linux_sync_obj); ++DECL_HANDLER(fast_select_queue); ++DECL_HANDLER(fast_unselect_queue); + DECL_HANDLER(set_keyboard_repeat); +-DECL_HANDLER(get_esync_apc_fd); ++DECL_HANDLER(get_fast_alert_event); + + #ifdef WANT_REQUEST_HANDLERS + +@@ -704,12 +704,12 @@ static const req_handler req_handlers[REQ_NB_REQUESTS] = + (req_handler)req_suspend_process, + (req_handler)req_resume_process, + (req_handler)req_get_next_thread, +- (req_handler)req_create_esync, +- (req_handler)req_open_esync, +- (req_handler)req_get_esync_fd, +- (req_handler)req_esync_msgwait, ++ (req_handler)req_get_linux_sync_device, ++ (req_handler)req_get_linux_sync_obj, ++ (req_handler)req_fast_select_queue, ++ (req_handler)req_fast_unselect_queue, + (req_handler)req_set_keyboard_repeat, +- (req_handler)req_get_esync_apc_fd, ++ (req_handler)req_get_fast_alert_event, + }; + + C_ASSERT( sizeof(abstime_t) == 8 ); +@@ -2363,37 +2363,28 @@ C_ASSERT( FIELD_OFFSET(struct get_next_thread_request, flags) == 28 ); + C_ASSERT( sizeof(struct get_next_thread_request) == 32 ); + C_ASSERT( FIELD_OFFSET(struct get_next_thread_reply, handle) == 8 ); + C_ASSERT( sizeof(struct get_next_thread_reply) == 16 ); +-C_ASSERT( FIELD_OFFSET(struct create_esync_request, access) == 12 ); +-C_ASSERT( FIELD_OFFSET(struct create_esync_request, initval) == 16 ); +-C_ASSERT( FIELD_OFFSET(struct create_esync_request, type) == 20 ); +-C_ASSERT( FIELD_OFFSET(struct create_esync_request, max) == 24 ); +-C_ASSERT( sizeof(struct create_esync_request) == 32 ); +-C_ASSERT( FIELD_OFFSET(struct create_esync_reply, handle) == 8 ); +-C_ASSERT( FIELD_OFFSET(struct create_esync_reply, type) == 12 ); +-C_ASSERT( FIELD_OFFSET(struct create_esync_reply, shm_idx) == 16 ); +-C_ASSERT( sizeof(struct create_esync_reply) == 24 ); +-C_ASSERT( FIELD_OFFSET(struct open_esync_request, access) == 12 ); +-C_ASSERT( FIELD_OFFSET(struct open_esync_request, attributes) == 16 ); +-C_ASSERT( FIELD_OFFSET(struct open_esync_request, rootdir) == 20 ); +-C_ASSERT( FIELD_OFFSET(struct open_esync_request, type) == 24 ); +-C_ASSERT( sizeof(struct open_esync_request) == 32 ); +-C_ASSERT( FIELD_OFFSET(struct open_esync_reply, handle) == 8 ); +-C_ASSERT( FIELD_OFFSET(struct open_esync_reply, type) == 12 ); +-C_ASSERT( FIELD_OFFSET(struct open_esync_reply, shm_idx) == 16 ); +-C_ASSERT( sizeof(struct open_esync_reply) == 24 ); +-C_ASSERT( FIELD_OFFSET(struct get_esync_fd_request, handle) == 12 ); +-C_ASSERT( sizeof(struct get_esync_fd_request) == 16 ); +-C_ASSERT( FIELD_OFFSET(struct get_esync_fd_reply, type) == 8 ); +-C_ASSERT( FIELD_OFFSET(struct get_esync_fd_reply, shm_idx) == 12 ); +-C_ASSERT( sizeof(struct get_esync_fd_reply) == 16 ); +-C_ASSERT( FIELD_OFFSET(struct esync_msgwait_request, in_msgwait) == 12 ); +-C_ASSERT( sizeof(struct esync_msgwait_request) == 16 ); ++C_ASSERT( sizeof(struct get_linux_sync_device_request) == 16 ); ++C_ASSERT( FIELD_OFFSET(struct get_linux_sync_device_reply, handle) == 8 ); ++C_ASSERT( sizeof(struct get_linux_sync_device_reply) == 16 ); ++C_ASSERT( FIELD_OFFSET(struct get_linux_sync_obj_request, handle) == 12 ); ++C_ASSERT( sizeof(struct get_linux_sync_obj_request) == 16 ); ++C_ASSERT( FIELD_OFFSET(struct get_linux_sync_obj_reply, handle) == 8 ); ++C_ASSERT( FIELD_OFFSET(struct get_linux_sync_obj_reply, type) == 12 ); ++C_ASSERT( FIELD_OFFSET(struct get_linux_sync_obj_reply, access) == 16 ); ++C_ASSERT( sizeof(struct get_linux_sync_obj_reply) == 24 ); ++C_ASSERT( FIELD_OFFSET(struct fast_select_queue_request, handle) == 12 ); ++C_ASSERT( sizeof(struct fast_select_queue_request) == 16 ); ++C_ASSERT( FIELD_OFFSET(struct fast_unselect_queue_request, handle) == 12 ); ++C_ASSERT( FIELD_OFFSET(struct fast_unselect_queue_request, signaled) == 16 ); ++C_ASSERT( sizeof(struct fast_unselect_queue_request) == 24 ); ++C_ASSERT( sizeof(struct get_fast_alert_event_request) == 16 ); ++C_ASSERT( FIELD_OFFSET(struct get_fast_alert_event_reply, handle) == 8 ); + C_ASSERT( FIELD_OFFSET(struct set_keyboard_repeat_request, enable) == 12 ); + C_ASSERT( FIELD_OFFSET(struct set_keyboard_repeat_request, delay) == 16 ); + C_ASSERT( FIELD_OFFSET(struct set_keyboard_repeat_request, period) == 20 ); + C_ASSERT( sizeof(struct set_keyboard_repeat_request) == 24 ); + C_ASSERT( FIELD_OFFSET(struct set_keyboard_repeat_reply, enable) == 8 ); + C_ASSERT( sizeof(struct set_keyboard_repeat_reply) == 16 ); +-C_ASSERT( sizeof(struct get_esync_apc_fd_request) == 16 ); ++C_ASSERT( sizeof(struct get_fast_alert_event_reply) == 16 ); + + #endif /* WANT_REQUEST_HANDLERS */ + +diff --git a/server/semaphore.c b/server/semaphore.c +index e3889f2..9940919 100644 +--- a/server/semaphore.c ++++ b/server/semaphore.c +@@ -55,12 +55,15 @@ struct semaphore + struct object obj; /* object header */ + unsigned int count; /* current count */ + unsigned int max; /* maximum possible count */ ++ struct fast_sync *fast_sync; /* fast synchronization object */ + }; + + static void semaphore_dump( struct object *obj, int verbose ); + static int semaphore_signaled( struct object *obj, struct wait_queue_entry *entry ); + static void semaphore_satisfied( struct object *obj, struct wait_queue_entry *entry ); + static int semaphore_signal( struct object *obj, unsigned int access ); ++static struct fast_sync *semaphore_get_fast_sync( struct object *obj ); ++static void semaphore_destroy( struct object *obj ); + + static const struct object_ops semaphore_ops = + { +@@ -70,7 +73,6 @@ static const struct object_ops semaphore_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + semaphore_signaled, /* signaled */ +- NULL, /* get_esync_fd */ + semaphore_satisfied, /* satisfied */ + semaphore_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -83,8 +85,9 @@ static const struct object_ops semaphore_ops = + default_unlink_name, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ semaphore_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ +- no_destroy /* destroy */ ++ semaphore_destroy /* destroy */ + }; + + +@@ -106,6 +109,7 @@ static struct semaphore *create_semaphore( struct object *root, const struct uni + /* initialize it if it didn't already exist */ + sem->count = initial; + sem->max = max; ++ sem->fast_sync = NULL; + } + } + return sem; +@@ -168,6 +172,23 @@ static int semaphore_signal( struct object *obj, unsigned int access ) + return release_semaphore( sem, 1, NULL ); + } + ++static struct fast_sync *semaphore_get_fast_sync( struct object *obj ) ++{ ++ struct semaphore *semaphore = (struct semaphore *)obj; ++ ++ if (!semaphore->fast_sync) ++ semaphore->fast_sync = fast_create_semaphore( semaphore->count, semaphore->max ); ++ if (semaphore->fast_sync) grab_object( semaphore->fast_sync ); ++ return semaphore->fast_sync; ++} ++ ++static void semaphore_destroy( struct object *obj ) ++{ ++ struct semaphore *semaphore = (struct semaphore *)obj; ++ ++ if (semaphore->fast_sync) release_object( semaphore->fast_sync ); ++} ++ + /* create a semaphore */ + DECL_HANDLER(create_semaphore) + { +diff --git a/server/serial.c b/server/serial.c +index 11e204e..5c210d1 100644 +--- a/server/serial.c ++++ b/server/serial.c +@@ -85,7 +85,6 @@ static const struct object_ops serial_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + default_fd_signaled, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + serial_get_fd, /* get_fd */ +@@ -98,6 +97,7 @@ static const struct object_ops serial_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ default_fd_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + serial_destroy /* destroy */ + }; +diff --git a/server/signal.c b/server/signal.c +index 55cd6aa..e5def3d 100644 +--- a/server/signal.c ++++ b/server/signal.c +@@ -62,7 +62,6 @@ static const struct object_ops handler_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -75,6 +74,7 @@ static const struct object_ops handler_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + handler_destroy /* destroy */ + }; +diff --git a/server/sock.c b/server/sock.c +index 104321b..3fb2e29 100644 +--- a/server/sock.c ++++ b/server/sock.c +@@ -453,7 +453,6 @@ static const struct object_ops sock_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + default_fd_signaled, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + sock_get_fd, /* get_fd */ +@@ -466,6 +465,7 @@ static const struct object_ops sock_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ default_fd_get_fast_sync, /* get_fast_sync */ + sock_close_handle, /* close_handle */ + sock_destroy /* destroy */ + }; +@@ -3555,7 +3555,6 @@ static const struct object_ops ifchange_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + ifchange_get_fd, /* get_fd */ +@@ -3568,6 +3567,7 @@ static const struct object_ops ifchange_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + ifchange_destroy /* destroy */ + }; +@@ -3777,7 +3777,6 @@ static const struct object_ops socket_device_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -3790,6 +3789,7 @@ static const struct object_ops socket_device_ops = + default_unlink_name, /* unlink_name */ + socket_device_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + no_destroy /* destroy */ + }; +diff --git a/server/symlink.c b/server/symlink.c +index c7f3441..4a7cf68 100644 +--- a/server/symlink.c ++++ b/server/symlink.c +@@ -71,7 +71,6 @@ static const struct object_ops symlink_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -84,6 +83,7 @@ static const struct object_ops symlink_ops = + default_unlink_name, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + symlink_destroy /* destroy */ + }; +diff --git a/server/thread.c b/server/thread.c +index 2ce94b4..8f603fc 100644 +--- a/server/thread.c ++++ b/server/thread.c +@@ -50,7 +50,6 @@ + #include "request.h" + #include "user.h" + #include "security.h" +-#include "esync.h" + + + /* thread queues */ +@@ -97,7 +96,6 @@ static const struct object_ops thread_apc_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + thread_apc_signaled, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -110,6 +108,7 @@ static const struct object_ops thread_apc_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + thread_apc_destroy /* destroy */ + }; +@@ -140,7 +139,6 @@ static const struct object_ops context_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + context_signaled, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -153,6 +151,7 @@ static const struct object_ops context_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + no_destroy /* destroy */ + }; +@@ -177,10 +176,10 @@ struct type_descr thread_type = + + static void dump_thread( struct object *obj, int verbose ); + static int thread_signaled( struct object *obj, struct wait_queue_entry *entry ); +-static int thread_get_esync_fd( struct object *obj, enum esync_type *type ); + static unsigned int thread_map_access( struct object *obj, unsigned int access ); + static void thread_poll_event( struct fd *fd, int event ); + static struct list *thread_get_kernel_obj_list( struct object *obj ); ++static struct fast_sync *thread_get_fast_sync( struct object *obj ); + static void destroy_thread( struct object *obj ); + + static const struct object_ops thread_ops = +@@ -191,7 +190,6 @@ static const struct object_ops thread_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + thread_signaled, /* signaled */ +- thread_get_esync_fd, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -204,6 +202,7 @@ static const struct object_ops thread_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + thread_get_kernel_obj_list, /* get_kernel_obj_list */ ++ thread_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + destroy_thread /* destroy */ + }; +@@ -231,8 +230,6 @@ static inline void init_thread_structure( struct thread *thread ) + thread->context = NULL; + thread->teb = 0; + thread->entry_point = 0; +- thread->esync_fd = -1; +- thread->esync_apc_fd = -1; + thread->system_regs = 0; + thread->queue = NULL; + thread->wait = NULL; +@@ -253,6 +250,7 @@ static inline void init_thread_structure( struct thread *thread ) + thread->token = NULL; + thread->desc = NULL; + thread->desc_len = 0; ++ thread->fast_sync = NULL; ++ thread->fast_alert_event = NULL; +- thread->exit_poll = NULL; + + thread->creation_time = current_time; +@@ -380,12 +379,6 @@ struct thread *create_thread( int fd, struct process *process, const struct secu + } + } + +- if (do_esync()) +- { +- thread->esync_fd = esync_create_fd( 0, 0 ); +- thread->esync_apc_fd = esync_create_fd( 0, 0 ); +- } +- + set_fd_events( thread->request_fd, POLLIN ); /* start listening to events */ + add_process_thread( thread->process, thread ); + return thread; +@@ -410,6 +403,16 @@ static struct list *thread_get_kernel_obj_list( struct object *obj ) + return &thread->kernel_object; + } + ++static struct fast_sync *thread_get_fast_sync( struct object *obj ) ++{ ++ struct thread *thread = (struct thread *)obj; ++ ++ if (!thread->fast_sync) ++ thread->fast_sync = fast_create_event( FAST_SYNC_MANUAL_SERVER, thread->state == TERMINATED ); ++ if (thread->fast_sync) grab_object( thread->fast_sync ); ++ return thread->fast_sync; ++} ++ + /* cleanup everything that is no longer needed by a dead thread */ + /* used by destroy_thread and kill_thread */ + static void cleanup_thread( struct thread *thread ) +@@ -465,9 +468,7 @@ static void destroy_thread( struct object *obj ) +- if (thread->exit_poll) remove_timeout_user( thread->exit_poll ); + if (thread->id) free_ptid( thread->id ); + if (thread->token) release_object( thread->token ); +- +- if (do_esync()) +- close( thread->esync_fd ); ++ if (thread->fast_sync) release_object( thread->fast_sync ); ++ if (thread->fast_alert_event) release_object( thread->fast_alert_event ); + } + + /* dump a thread on stdout for debugging purposes */ +@@ -486,13 +488,6 @@ static int thread_signaled( struct object *obj, struct wait_queue_entry *entry ) +- return mythread->state == TERMINATED && !mythread->exit_poll; ++ return (mythread->state == TERMINATED); + } + +-static int thread_get_esync_fd( struct object *obj, enum esync_type *type ) +-{ +- struct thread *thread = (struct thread *)obj; +- *type = ESYNC_MANUAL_SERVER; +- return thread->esync_fd; +-} +- + static unsigned int thread_map_access( struct object *obj, unsigned int access ) + { + access = default_map_access( obj, access ); +@@ -1087,9 +1082,6 @@ void wake_up( struct object *obj, int max ) + struct list *ptr; + int ret; + +- if (do_esync()) +- esync_wake_up( obj ); +- + LIST_FOR_EACH( ptr, &obj->wait_queue ) + { + struct wait_queue_entry *entry = LIST_ENTRY( ptr, struct wait_queue_entry, entry ); +@@ -1177,8 +1169,8 @@ static int queue_apc( struct process *process, struct thread *thread, struct thr + { + wake_thread( thread ); + +- if (do_esync() && queue == &thread->user_apc) +- esync_wake_fd( thread->esync_apc_fd ); ++ if (apc->call.type == APC_USER && thread->fast_alert_event) ++ set_event( thread->fast_alert_event ); + } + + return 1; +@@ -1211,6 +1203,8 @@ void thread_cancel_apc( struct thread *thread, struct object *owner, enum apc_ty + apc->executed = 1; + wake_up( &apc->obj, 0 ); + release_object( apc ); ++ if (list_empty( &thread->user_apc ) && thread->fast_alert_event) ++ reset_event( thread->fast_alert_event ); + return; + } + } +@@ -1225,11 +1219,10 @@ static struct thread_apc *thread_dequeue_apc( struct thread *thread, int system + { + apc = LIST_ENTRY( ptr, struct thread_apc, entry ); + list_remove( ptr ); +- } +- +- if (do_esync() && list_empty( &thread->system_apc ) && list_empty( &thread->user_apc )) +- esync_clear( thread->esync_apc_fd ); + ++ if (list_empty( &thread->user_apc ) && thread->fast_alert_event) ++ reset_event( thread->fast_alert_event ); ++ } + return apc; + } + +@@ -1305,26 +1302,6 @@ + return -1; + } + +-static void check_terminated( void *arg ) +-{ +- struct thread *thread = arg; +- assert( thread->obj.ops == &thread_ops ); +- assert( thread->state == TERMINATED ); +- +- /* don't wake up until the thread is really dead, to avoid race conditions */ +- if (thread->unix_tid != -1 && !kill( thread->unix_tid, 0 )) +- { +- thread->exit_poll = add_timeout_user( -TICKS_PER_SEC / 1000, check_terminated, thread ); +- return; +- } +- +- /* grab reference since object can be destroyed while trying to wake up */ +- grab_object( &thread->obj ); +- thread->exit_poll = NULL; +- wake_up( &thread->obj, 0 ); +- release_object( &thread->obj ); +-} +- + /* kill a thread on the spot */ + void kill_thread( struct thread *thread, int violent_death ) + { +@@ -1345,14 +1338,9 @@ void kill_thread( struct thread *thread, int violent_death ) + } + kill_console_processes( thread, 0 ); + abandon_mutexes( thread ); +- if (do_esync()) +- esync_abandon_mutexes( thread ); ++ fast_set_event( thread->fast_sync ); ++ wake_up( &thread->obj, 0 ); ++ if (violent_death) send_thread_signal( thread, SIGQUIT ); +- if (violent_death) +- { +- send_thread_signal( thread, SIGQUIT ); +- check_terminated( thread ); +- } +- else wake_up( &thread->obj, 0 ); + cleanup_thread( thread ); + remove_process_thread( thread->process, thread ); + release_object( thread ); +@@ -2094,3 +2086,12 @@ DECL_HANDLER(get_next_thread) + set_error( STATUS_NO_MORE_ENTRIES ); + release_object( process ); + } ++ ++DECL_HANDLER(get_fast_alert_event) ++{ ++ if (!current->fast_alert_event) ++ current->fast_alert_event = create_event( NULL, NULL, 0, 1, !list_empty( ¤t->user_apc ), NULL ); ++ ++ if (current->fast_alert_event) ++ reply->handle = alloc_handle( current->process, current->fast_alert_event, SYNCHRONIZE, 0 ); ++} +diff --git a/server/thread.h b/server/thread.h +index 10e9e28..cb4643a 100644 +--- a/server/thread.h ++++ b/server/thread.h +@@ -55,8 +55,6 @@ struct thread + struct process *process; + thread_id_t id; /* thread id */ + struct list mutex_list; /* list of currently owned mutexes */ +- int esync_fd; /* esync file descriptor (signalled on exit) */ +- int esync_apc_fd; /* esync apc fd (signalled when APCs are present) */ + unsigned int system_regs; /* which system regs have been set */ + struct msg_queue *queue; /* message queue */ + struct thread_wait *wait; /* current wait condition if sleeping */ +@@ -94,6 +92,7 @@ struct thread + data_size_t desc_len; /* thread description length in bytes */ + WCHAR *desc; /* thread description string */ +- struct timeout_user *exit_poll; /* poll if the thread/process has exited already */ ++ struct fast_sync *fast_sync; /* fast synchronization object */ ++ struct event *fast_alert_event; /* fast synchronization alert event */ + }; + + extern struct thread *current; +diff --git a/server/timer.c b/server/timer.c +index 36645a2..854a8e1 100644 +--- a/server/timer.c ++++ b/server/timer.c +@@ -35,7 +35,6 @@ + #include "file.h" + #include "handle.h" + #include "request.h" +-#include "esync.h" + + static const WCHAR timer_name[] = {'T','i','m','e','r'}; + +@@ -62,13 +61,13 @@ struct timer + struct thread *thread; /* thread that set the APC function */ + client_ptr_t callback; /* callback APC function */ + client_ptr_t arg; /* callback argument */ +- int esync_fd; /* esync file descriptor */ ++ struct fast_sync *fast_sync; /* fast synchronization object */ + }; + + static void timer_dump( struct object *obj, int verbose ); + static int timer_signaled( struct object *obj, struct wait_queue_entry *entry ); +-static int timer_get_esync_fd( struct object *obj, enum esync_type *type ); + static void timer_satisfied( struct object *obj, struct wait_queue_entry *entry ); ++static struct fast_sync *timer_get_fast_sync( struct object *obj ); + static void timer_destroy( struct object *obj ); + + static const struct object_ops timer_ops = +@@ -79,7 +78,6 @@ static const struct object_ops timer_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + timer_signaled, /* signaled */ +- timer_get_esync_fd, /* get_esync_fd */ + timer_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -92,6 +90,7 @@ static const struct object_ops timer_ops = + default_unlink_name, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ timer_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + timer_destroy /* destroy */ + }; +@@ -114,10 +113,7 @@ static struct timer *create_timer( struct object *root, const struct unicode_str + timer->period = 0; + timer->timeout = NULL; + timer->thread = NULL; +- timer->esync_fd = -1; +- +- if (do_esync()) +- timer->esync_fd = esync_create_fd( 0, 0 ); ++ timer->fast_sync = NULL; + } + } + return timer; +@@ -159,6 +155,7 @@ static void timer_callback( void *private ) + /* wake up waiters */ + timer->signaled = 1; + wake_up( &timer->obj, 0 ); ++ fast_set_event( timer->fast_sync ); + } + + /* cancel a running timer */ +@@ -189,9 +186,7 @@ static int set_timer( struct timer *timer, timeout_t expire, unsigned int period + { + period = 0; /* period doesn't make any sense for a manual timer */ + timer->signaled = 0; +- +- if (do_esync()) +- esync_clear( timer->esync_fd ); ++ fast_reset_event( timer->fast_sync ); + } + timer->when = (expire <= 0) ? expire - monotonic_time : max( expire, current_time ); + timer->period = period; +@@ -219,18 +214,24 @@ static int timer_signaled( struct object *obj, struct wait_queue_entry *entry ) + return timer->signaled; + } + +-static int timer_get_esync_fd( struct object *obj, enum esync_type *type ) ++static void timer_satisfied( struct object *obj, struct wait_queue_entry *entry ) + { + struct timer *timer = (struct timer *)obj; +- *type = timer->manual ? ESYNC_MANUAL_SERVER : ESYNC_AUTO_SERVER; +- return timer->esync_fd; ++ assert( obj->ops == &timer_ops ); ++ if (!timer->manual) timer->signaled = 0; + } + +-static void timer_satisfied( struct object *obj, struct wait_queue_entry *entry ) ++static struct fast_sync *timer_get_fast_sync( struct object *obj ) + { + struct timer *timer = (struct timer *)obj; +- assert( obj->ops == &timer_ops ); +- if (!timer->manual) timer->signaled = 0; ++ ++ if (!timer->fast_sync) ++ { ++ enum fast_sync_type type = timer->manual ? FAST_SYNC_MANUAL_SERVER : FAST_SYNC_AUTO_SERVER; ++ timer->fast_sync = fast_create_event( type, timer->signaled ); ++ } ++ if (timer->fast_sync) grab_object( timer->fast_sync ); ++ return timer->fast_sync; + } + + static void timer_destroy( struct object *obj ) +@@ -240,7 +241,7 @@ static void timer_destroy( struct object *obj ) + + if (timer->timeout) remove_timeout_user( timer->timeout ); + if (timer->thread) release_object( timer->thread ); +- if (do_esync()) close( timer->esync_fd ); ++ if (timer->fast_sync) release_object( timer->fast_sync ); + } + + /* create a timer */ +diff --git a/server/token.c b/server/token.c +index 8b4d2f0..42562fd 100644 +--- a/server/token.c ++++ b/server/token.c +@@ -143,7 +143,6 @@ static const struct object_ops token_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -156,6 +155,7 @@ static const struct object_ops token_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + token_destroy /* destroy */ + }; +diff --git a/server/trace.c b/server/trace.c +index af96565..c027f4b 100644 +--- a/server/trace.c ++++ b/server/trace.c +@@ -4619,67 +4619,56 @@ static void dump_get_next_thread_reply( const struct get_next_thread_reply *req + fprintf( stderr, " handle=%04x", req->handle ); + } + +-static void dump_create_esync_request( const struct create_esync_request *req ) ++static void dump_get_linux_sync_device_request( const struct get_linux_sync_device_request *req ) + { +- fprintf( stderr, " access=%08x", req->access ); +- fprintf( stderr, ", initval=%d", req->initval ); +- fprintf( stderr, ", type=%d", req->type ); +- fprintf( stderr, ", max=%d", req->max ); +- dump_varargs_object_attributes( ", objattr=", cur_size ); + } + +-static void dump_create_esync_reply( const struct create_esync_reply *req ) ++static void dump_get_linux_sync_device_reply( const struct get_linux_sync_device_reply *req ) + { + fprintf( stderr, " handle=%04x", req->handle ); +- fprintf( stderr, ", type=%d", req->type ); +- fprintf( stderr, ", shm_idx=%08x", req->shm_idx ); + } + +-static void dump_open_esync_request( const struct open_esync_request *req ) ++static void dump_get_linux_sync_obj_request( const struct get_linux_sync_obj_request *req ) + { +- fprintf( stderr, " access=%08x", req->access ); +- fprintf( stderr, ", attributes=%08x", req->attributes ); +- fprintf( stderr, ", rootdir=%04x", req->rootdir ); +- fprintf( stderr, ", type=%d", req->type ); +- dump_varargs_unicode_str( ", name=", cur_size ); ++ fprintf( stderr, " handle=%04x", req->handle ); + } + +-static void dump_open_esync_reply( const struct open_esync_reply *req ) ++static void dump_get_linux_sync_obj_reply( const struct get_linux_sync_obj_reply *req ) + { + fprintf( stderr, " handle=%04x", req->handle ); + fprintf( stderr, ", type=%d", req->type ); +- fprintf( stderr, ", shm_idx=%08x", req->shm_idx ); ++ fprintf( stderr, ", access=%08x", req->access ); + } + +-static void dump_get_esync_fd_request( const struct get_esync_fd_request *req ) ++static void dump_fast_select_queue_request( const struct fast_select_queue_request *req ) + { + fprintf( stderr, " handle=%04x", req->handle ); + } + +-static void dump_get_esync_fd_reply( const struct get_esync_fd_reply *req ) ++static void dump_fast_unselect_queue_request( const struct fast_unselect_queue_request *req ) + { +- fprintf( stderr, " type=%d", req->type ); +- fprintf( stderr, ", shm_idx=%08x", req->shm_idx ); ++ fprintf( stderr, " handle=%04x", req->handle ); ++ fprintf( stderr, ", signaled=%d", req->signaled ); + } + +-static void dump_esync_msgwait_request( const struct esync_msgwait_request *req ) ++static void dump_get_fast_alert_event_request( const struct get_fast_alert_event_request *req ) + { +- fprintf( stderr, " in_msgwait=%d", req->in_msgwait ); + } + + static void dump_set_keyboard_repeat_request( const struct set_keyboard_repeat_request *req ) + { + fprintf( stderr, " enable=%d", req->enable ); + fprintf( stderr, ", delay=%d", req->delay ); + fprintf( stderr, ", period=%d", req->period ); + } + + static void dump_set_keyboard_repeat_reply( const struct set_keyboard_repeat_reply *req ) + { + fprintf( stderr, " enable=%d", req->enable ); + } + +-static void dump_get_esync_apc_fd_request( const struct get_esync_apc_fd_request *req ) ++static void dump_get_fast_alert_event_reply( const struct get_fast_alert_event_reply *req ) + { ++ fprintf( stderr, " handle=%04x", req->handle ); + } + + static const dump_func req_dumpers[REQ_NB_REQUESTS] = { +@@ -4959,11 +4948,11 @@ static const dump_func req_dumpers[REQ_NB_REQUESTS] = { + (dump_func)dump_suspend_process_request, + (dump_func)dump_resume_process_request, + (dump_func)dump_get_next_thread_request, +- (dump_func)dump_create_esync_request, +- (dump_func)dump_open_esync_request, +- (dump_func)dump_get_esync_fd_request, +- (dump_func)dump_esync_msgwait_request, +- (dump_func)dump_get_esync_apc_fd_request, ++ (dump_func)dump_get_linux_sync_device_request, ++ (dump_func)dump_get_linux_sync_obj_request, ++ (dump_func)dump_fast_select_queue_request, ++ (dump_func)dump_fast_unselect_queue_request, ++ (dump_func)dump_get_fast_alert_event_request, + }; + + static const dump_func reply_dumpers[REQ_NB_REQUESTS] = { +@@ -5254,12 +5243,12 @@ static const dump_func reply_dumpers[REQ_NB_REQUESTS] = { + NULL, + NULL, + (dump_func)dump_get_next_thread_reply, +- (dump_func)dump_create_esync_reply, +- (dump_func)dump_open_esync_reply, +- (dump_func)dump_get_esync_fd_reply, ++ (dump_func)dump_get_linux_sync_device_reply, ++ (dump_func)dump_get_linux_sync_obj_reply, + NULL, + (dump_func)dump_set_keyboard_repeat_reply, + NULL, ++ (dump_func)dump_get_fast_alert_event_reply, + }; + + static const char * const req_names[REQ_NB_REQUESTS] = { +@@ -5549,11 +5538,12 @@ static const char * const req_names[REQ_NB_REQUESTS] = { + "suspend_process", + "resume_process", + "get_next_thread", +- "create_esync", +- "open_esync", +- "get_esync_fd", +- "esync_msgwait", ++ "get_linux_sync_device", ++ "get_linux_sync_obj", ++ "fast_select_queue", ++ "fast_unselect_queue", + "set_keyboard_repeat", +- "get_esync_apc_fd", ++ "get_fast_alert_event", + }; + + static const struct +diff --git a/server/window.c b/server/window.c +index feac54e..f52e118 100644 +--- a/server/window.c ++++ b/server/window.c +@@ -108,7 +108,6 @@ static const struct object_ops window_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -121,6 +120,7 @@ static const struct object_ops window_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + window_destroy /* destroy */ + }; +diff --git a/server/winstation.c b/server/winstation.c +index 4ef21d9..2719c9e 100644 +--- a/server/winstation.c ++++ b/server/winstation.c +@@ -76,7 +76,6 @@ static const struct object_ops winstation_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -89,6 +88,7 @@ static const struct object_ops winstation_ops = + default_unlink_name, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + winstation_close_handle, /* close_handle */ + winstation_destroy /* destroy */ + }; +@@ -117,7 +117,6 @@ static const struct object_ops desktop_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -130,6 +129,7 @@ static const struct object_ops desktop_ops = + default_unlink_name, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + desktop_close_handle, /* close_handle */ + desktop_destroy /* destroy */ + }; diff --git a/wine-tkg-git/wine-tkg-patches/misc/fastsync/legacy/ntsync5-staging-protonify-1dfac2a.patch b/wine-tkg-git/wine-tkg-patches/misc/fastsync/legacy/ntsync5-staging-protonify-1dfac2a.patch new file mode 100644 index 000000000..1ccca045d --- /dev/null +++ b/wine-tkg-git/wine-tkg-patches/misc/fastsync/legacy/ntsync5-staging-protonify-1dfac2a.patch @@ -0,0 +1,8319 @@ +diff --git a/README.esync b/README.esync +deleted file mode 100644 +index 11d8656..0000000 +--- a/README.esync ++++ /dev/null +@@ -1,196 +0,0 @@ +-This is eventfd-based synchronization, or 'esync' for short. Turn it on with +-WINEESYNC=1; debug it with +esync. +- +-== BUGS AND LIMITATIONS == +- +-Please let me know if you find any bugs. If you can, also attach a log with +-+seh,+pid,+esync,+server,+timestamp. +- +-If you get something like "eventfd: Too many open files" and then things start +-crashing, you've probably run out of file descriptors. esync creates one +-eventfd descriptor for each synchronization object, and some games may use a +-large number of these. Linux by default limits a process to 4096 file +-descriptors, which probably was reasonable back in the nineties but isn't +-really anymore. (Fortunately Debian and derivatives [Ubuntu, Mint] already +-have a reasonable limit.) To raise the limit you'll want to edit +-/etc/security/limits.conf and add a line like +- +-* hard nofile 1048576 +- +-then restart your session. +- +-On distributions using systemd, the settings in `/etc/security/limits.conf` +-will be overridden by systemd's own settings. If you run `ulimit -Hn` and it +-returns a lower number than the one you've previously set, then you can set +- +-DefaultLimitNOFILE=1048576 +- +-in both `/etc/systemd/system.conf` and `/etc/systemd/user.conf`. You can then +-execute `sudo systemctl daemon-reexec` and restart your session. Check again +-with `ulimit -Hn` that the limit is correct. +- +-Also note that if the wineserver has esync active, all clients also must, and +-vice versa. Otherwise things will probably crash quite badly. +- +-== EXPLANATION == +- +-The aim is to execute all synchronization operations in "user-space", that is, +-without going through wineserver. We do this using Linux's eventfd +-facility. The main impetus to using eventfd is so that we can poll multiple +-objects at once; in particular we can't do this with futexes, or pthread +-semaphores, or the like. The only way I know of to wait on any of multiple +-objects is to use select/poll/epoll to wait on multiple fds, and eventfd gives +-us those fds in a quite usable way. +- +-Whenever a semaphore, event, or mutex is created, we have the server, instead +-of creating a traditional server-side event/semaphore/mutex, instead create an +-'esync' primitive. These live in esync.c and are very slim objects; in fact, +-they don't even know what type of primitive they are. The server is involved +-at all because we still need a way of creating named objects, passing handles +-to another process, etc. +- +-The server creates an eventfd file descriptor with the requested parameters +-and passes it back to ntdll. ntdll creates an object of the appropriate type, +-then caches it in a table. This table is copied almost wholesale from the fd +-cache code in server.c. +- +-Specific operations follow quite straightforwardly from eventfd: +- +-* To release an object, or set an event, we simply write() to it. +-* An object is signalled if read() succeeds on it. Notably, we create all +- eventfd descriptors with O_NONBLOCK, so that we can atomically check if an +- object is signalled and grab it if it is. This also lets us reset events. +-* For objects whose state should not be reset upon waiting—e.g. manual-reset +- events—we simply check for the POLLIN flag instead of reading. +-* Semaphores are handled by the EFD_SEMAPHORE flag. This matches up quite well +- (although with some difficulties; see below). +-* Mutexes store their owner thread locally. This isn't reliable information if +- a different process's thread owns the mutex, but this doesn't matter—a +- thread should only care whether it owns the mutex, so it knows whether to +- try waiting on it or simply to increase the recursion count. +- +-The interesting part about esync is that (almost) all waits happen in ntdll, +-including those on server-bound objects. The idea here is that on the server +-side, for any waitable object, we create an eventfd file descriptor (not an +-esync primitive), and then pass it to ntdll if the program tries to wait on +-it. These are cached too, so only the first wait will require a round trip to +-the server. Then the server signals the file descriptor as appropriate, and +-thereby wakes up the client. So far this is implemented for processes, +-threads, message queues (difficult; see below), and device managers (necessary +-for drivers to work). All of these are necessarily server-bound, so we +-wouldn't really gain anything by signalling on the client side instead. Of +-course, except possibly for message queues, it's not likely that any program +-(cutting-edge D3D game or not) is going to be causing a great wineserver load +-by waiting on any of these objects; the motivation was rather to provide a way +-to wait on ntdll-bound and server-bound objects at the same time. +- +-Some cases are still passed to the server, and there's probably no reason not +-to keep them that way. Those that I noticed while testing include: async +-objects, which are internal to the file APIs and never exposed to userspace, +-startup_info objects, which are internal to the loader and signalled when a +-process starts, and keyed events, which are exposed through an ntdll API +-(although not through kernel32) but can't be mixed with other objects (you +-have to use NtWaitForKeyedEvent()). Other cases include: named pipes, debug +-events, sockets, and timers. It's unlikely we'll want to optimize debug events +-or sockets (or any of the other, rather rare, objects), but it is possible +-we'll want to optimize named pipes or timers. +- +-There were two sort of complications when working out the above. The first one +-was events. The trouble is that (1) the server actually creates some events by +-itself and (2) the server sometimes manipulates events passed by the +-client. Resolving the first case was easy enough, and merely entailed creating +-eventfd descriptors for the events the same way as for processes and threads +-(note that we don't really lose anything this way; the events include +-"LowMemoryCondition" and the event that signals system processes to shut +-down). For the second case I basically had to hook the server-side event +-functions to redirect to esync versions if the event was actually an esync +-primitive. +- +-The second complication was message queues. The difficulty here is that X11 +-signals events by writing into a pipe (at least I think it's a pipe?), and so +-as a result wineserver has to poll on that descriptor. In theory we could just +-let wineserver do so and then signal us as appropriate, except that wineserver +-only polls on the pipe when the thread is waiting for events (otherwise we'd +-get e.g. keyboard input while the thread is doing something else, and spin +-forever trying to wake up a thread that doesn't care). The obvious solution is +-just to poll on that fd ourselves, and that's what I did—it's just that +-getting the fd from wineserver was kind of ugly, and the code for waiting was +-also kind of ugly basically because we have to wait on both X11's fd and the +-"normal" process/thread-style wineserver fd that we use to signal sent +-messages. The upshot about the whole thing was that races are basically +-impossible, since a thread can only wait on its own queue. +- +-System APCs already work, since the server will forcibly suspend a thread if +-it's not already waiting, and so we just need to check for EINTR from +-poll(). User APCs and alertable waits are implemented in a similar style to +-message queues (well, sort of): whenever someone executes an alertable wait, +-we add an additional eventfd to the list, which the server signals when an APC +-arrives. If that eventfd gets signaled, we hand it off to the server to take +-care of, and return STATUS_USER_APC. +- +-Originally I kept the volatile state of semaphores and mutexes inside a +-variable local to the handle, with the knowledge that this would break if +-someone tried to open the handle elsewhere or duplicate it. It did, and so now +-this state is stored inside shared memory. This is of the POSIX variety, is +-allocated by the server (but never mapped there) and lives under the path +-"/wine-esync". +- +-There are a couple things that this infrastructure can't handle, although +-surprisingly there aren't that many. In particular: +-* Implementing wait-all, i.e. WaitForMultipleObjects(..., TRUE, ...), is not +- exactly possible the way we'd like it to be possible. In theory that +- function should wait until it knows all objects are available, then grab +- them all at once atomically. The server (like the kernel) can do this +- because the server is single-threaded and can't race with itself. We can't +- do this in ntdll, though. The approach I've taken I've laid out in great +- detail in the relevant patch, but for a quick summary we poll on each object +- until it's signaled (but don't grab it), check them all again, and if +- they're all signaled we try to grab them all at once in a tight loop, and if +- we fail on any of them we reset the count on whatever we shouldn't have +- consumed. Such a blip would necessarily be very quick. +-* The whole patchset only works on Linux, where eventfd is available. However, +- it should be possible to make it work on a Mac, since eventfd is just a +- quicker, easier way to use pipes (i.e. instead of writing 1 to the fd you'd +- write 1 byte; instead of reading a 64-bit value from the fd you'd read as +- many bytes as you can carry, which is admittedly less than 2**64 but +- can probably be something reasonable.) It's also possible, although I +- haven't yet looked, to use some different kind of synchronization +- primitives, but pipes would be easiest to tack onto this framework. +-* PulseEvent() can't work the way it's supposed to work. Fortunately it's rare +- and deprecated. It's also explicitly mentioned on MSDN that a thread can +- miss the notification for a kernel APC, so in a sense we're not necessarily +- doing anything wrong. +- +-There are some things that are perfectly implementable but that I just haven't +-done yet: +-* Other synchronizable server primitives. It's unlikely we'll need any of +- these, except perhaps named pipes (which would honestly be rather difficult) +- and (maybe) timers. +-* Access masks. We'd need to store these inside ntdll, and validate them when +- someone tries to execute esync operations. +- +-This patchset was inspired by Daniel Santos' "hybrid synchronization" +-patchset. My idea was to create a framework whereby even contended waits could +-be executed in userspace, eliminating a lot of the complexity that his +-synchronization primitives used. I do however owe some significant gratitude +-toward him for setting me on the right path. +- +-I've tried to maximize code separation, both to make any potential rebases +-easier and to ensure that esync is only active when configured. All code in +-existing source files is guarded with "if (do_esync())", and generally that +-condition is followed by "return esync_version_of_this_method(...);", where +-the latter lives in esync.c and is declared in esync.h. I've also tried to +-make the patchset very clear and readable—to write it as if I were going to +-submit it upstream. (Some intermediate patches do break things, which Wine is +-generally against, but I think it's for the better in this case.) I have cut +-some corners, though; there is some error checking missing, or implicit +-assumptions that the program is behaving correctly. +- +-I've tried to be careful about races. There are a lot of comments whose +-purpose are basically to assure me that races are impossible. In most cases we +-don't have to worry about races since all of the low-level synchronization is +-done by the kernel. +- +-Anyway, yeah, this is esync. Use it if you like. +- +---Zebediah Figura +diff --git a/configure b/configure +index 4d13060..58291ba 100755 +--- a/configure ++++ b/configure +@@ -8110,6 +8110,12 @@ if test "x$ac_cv_header_linux_major_h" = xyes + then : + printf "%s\n" "#define HAVE_LINUX_MAJOR_H 1" >>confdefs.h + ++fi ++ac_fn_c_check_header_compile "$LINENO" "linux/ntsync.h" "ac_cv_header_linux_ntsync_h" "$ac_includes_default" ++if test "x$ac_cv_header_linux_ntsync_h" = xyes ++then : ++ printf "%s\n" "#define HAVE_LINUX_NTSYNC_H 1" >>confdefs.h ++ + fi + ac_fn_c_check_header_compile "$LINENO" "linux/param.h" "ac_cv_header_linux_param_h" "$ac_includes_default" + if test "x$ac_cv_header_linux_param_h" = xyes +diff --git a/configure.ac b/configure.ac +index a3c414d..5af9132 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -403,6 +403,7 @@ AC_CHECK_HEADERS(\ + linux/input.h \ + linux/ioctl.h \ + linux/major.h \ ++ linux/ntsync.h \ + linux/param.h \ + linux/seccomp.h \ + linux/serial.h \ +@@ -433,7 +434,6 @@ AC_CHECK_HEADERS(\ + sys/cdio.h \ + sys/epoll.h \ + sys/event.h \ +- sys/eventfd.h \ + sys/extattr.h \ + sys/filio.h \ + sys/ipc.h \ +@@ -2094,7 +2094,6 @@ AC_CHECK_FUNCS(\ + port_create \ + posix_fadvise \ + posix_fallocate \ +- ppoll \ + prctl \ + proc_pidinfo \ + sched_yield \ +@@ -2120,12 +2119,6 @@ case $host_os in + ;; + esac + +-ac_save_LIBS=$LIBS +-AC_SEARCH_LIBS(shm_open, rt, +- [AC_DEFINE(HAVE_SHM_OPEN, 1, [Define to 1 if you have the `shm_open' function.]) +- test "$ac_res" = "none required" || AC_SUBST(RT_LIBS,"$ac_res")]) +-LIBS=$ac_save_LIBS +- + AC_CACHE_CHECK([for sched_setaffinity],wine_cv_have_sched_setaffinity, + AC_LINK_IFELSE([AC_LANG_PROGRAM( + [[#include ]], [[sched_setaffinity(0, 0, 0);]])],[wine_cv_have_sched_setaffinity=yes],[wine_cv_have_sched_setaffinity=no])) +diff --git a/dlls/kernel32/tests/sync.c b/dlls/kernel32/tests/sync.c +index 424cbe2..50e81aa 100644 +--- a/dlls/kernel32/tests/sync.c ++++ b/dlls/kernel32/tests/sync.c +@@ -57,7 +57,6 @@ static BOOLEAN (WINAPI *pTryAcquireSRWLockShared)(PSRWLOCK); + + static NTSTATUS (WINAPI *pNtAllocateVirtualMemory)(HANDLE, PVOID *, ULONG_PTR, SIZE_T *, ULONG, ULONG); + static NTSTATUS (WINAPI *pNtFreeVirtualMemory)(HANDLE, PVOID *, SIZE_T *, ULONG); +-static NTSTATUS (WINAPI *pNtQuerySystemTime)(LARGE_INTEGER *); + static NTSTATUS (WINAPI *pNtWaitForSingleObject)(HANDLE, BOOLEAN, const LARGE_INTEGER *); + static NTSTATUS (WINAPI *pNtWaitForMultipleObjects)(ULONG,const HANDLE*,BOOLEAN,BOOLEAN,const LARGE_INTEGER*); + static PSLIST_ENTRY (__fastcall *pRtlInterlockedPushListSList)(PSLIST_HEADER list, PSLIST_ENTRY first, +@@ -228,23 +227,8 @@ static void test_temporary_objects(void) + ok(GetLastError() == ERROR_FILE_NOT_FOUND, "wrong error %lu\n", GetLastError()); + } + +-static HANDLE mutex, mutex2, mutices[2]; +- +-static DWORD WINAPI mutex_thread( void *param ) +-{ +- DWORD expect = (DWORD)(DWORD_PTR)param; +- DWORD ret; +- +- ret = WaitForSingleObject( mutex, 0 ); +- ok(ret == expect, "expected %lu, got %lu\n", expect, ret); +- +- if (!ret) ReleaseMutex( mutex ); +- return 0; +-} +- + static void test_mutex(void) + { +- HANDLE thread; + DWORD wait_ret; + BOOL ret; + HANDLE hCreated; +@@ -284,8 +268,7 @@ static void test_mutex(void) + SetLastError(0xdeadbeef); + hOpened = OpenMutexA(GENERIC_READ | GENERIC_WRITE, FALSE, "WineTestMutex"); + ok(hOpened != NULL, "OpenMutex failed with error %ld\n", GetLastError()); +- wait_ret = WaitForSingleObject(hOpened, 0); +-todo_wine_if(getenv("WINEESYNC")) /* XFAIL: validation is not implemented */ ++ wait_ret = WaitForSingleObject(hOpened, INFINITE); + ok(wait_ret == WAIT_FAILED, "WaitForSingleObject succeeded\n"); + CloseHandle(hOpened); + +@@ -316,7 +299,6 @@ todo_wine_if(getenv("WINEESYNC")) /* XFAIL: validation is not implemented */ + + SetLastError(0xdeadbeef); + ret = ReleaseMutex(hCreated); +-todo_wine_if(getenv("WINEESYNC")) /* XFAIL: due to the above */ + ok(!ret && (GetLastError() == ERROR_NOT_OWNER), + "ReleaseMutex should have failed with ERROR_NOT_OWNER instead of %ld\n", GetLastError()); + +@@ -355,85 +337,6 @@ todo_wine_if(getenv("WINEESYNC")) /* XFAIL: due to the above */ + CloseHandle(hOpened); + + CloseHandle(hCreated); +- +- mutex = CreateMutexA( NULL, FALSE, NULL ); +- ok(!!mutex, "got error %lu\n", GetLastError()); +- +- ret = ReleaseMutex( mutex ); +- ok(!ret, "got %d\n", ret); +- ok(GetLastError() == ERROR_NOT_OWNER, "got error %lu\n", GetLastError()); +- +- for (i = 0; i < 100; i++) +- { +- ret = WaitForSingleObject( mutex, 0 ); +- ok(ret == 0, "got %u\n", ret); +- } +- +- for (i = 0; i < 100; i++) +- { +- ret = ReleaseMutex( mutex ); +- ok(ret, "got error %lu\n", GetLastError()); +- } +- +- ret = ReleaseMutex( mutex ); +- ok(!ret, "got %d\n", ret); +- ok(GetLastError() == ERROR_NOT_OWNER, "got error %lu\n", GetLastError()); +- +- thread = CreateThread( NULL, 0, mutex_thread, (void *)0, 0, NULL ); +- ret = WaitForSingleObject( thread, 2000 ); +- ok(ret == 0, "wait failed: %u\n", ret); +- +- WaitForSingleObject( mutex, 0 ); +- +- thread = CreateThread( NULL, 0, mutex_thread, (void *)WAIT_TIMEOUT, 0, NULL ); +- ret = WaitForSingleObject( thread, 2000 ); +- ok(ret == 0, "wait failed: %u\n", ret); +- +- ret = ReleaseMutex( mutex ); +- ok(ret, "got error %lu\n", GetLastError()); +- +- thread = CreateThread( NULL, 0, mutex_thread, (void *)0, 0, NULL ); +- ret = WaitForSingleObject( thread, 2000 ); +- ok(ret == 0, "wait failed: %u\n", ret); +- +- mutex2 = CreateMutexA( NULL, TRUE, NULL ); +- ok(!!mutex2, "got error %lu\n", GetLastError()); +- +- ret = ReleaseMutex( mutex2 ); +- ok(ret, "got error %lu\n", GetLastError()); +- +- ret = ReleaseMutex( mutex2 ); +- ok(!ret, "got %d\n", ret); +- ok(GetLastError() == ERROR_NOT_OWNER, "got error %lu\n", GetLastError()); +- +- mutices[0] = mutex; +- mutices[1] = mutex2; +- +- ret = WaitForMultipleObjects( 2, mutices, FALSE, 0 ); +- ok(ret == 0, "got %u\n", ret); +- +- ret = ReleaseMutex( mutex ); +- ok(ret, "got error %lu\n", GetLastError()); +- +- ret = ReleaseMutex( mutex2 ); +- ok(!ret, "got %d\n", ret); +- ok(GetLastError() == ERROR_NOT_OWNER, "got error %lu\n", GetLastError()); +- +- ret = WaitForMultipleObjects( 2, mutices, TRUE, 0 ); +- ok(ret == 0, "got %u\n", ret); +- +- ret = ReleaseMutex( mutex ); +- ok(ret, "got error %lu\n", GetLastError()); +- +- ret = ReleaseMutex( mutex2 ); +- ok(ret, "got error %lu\n", GetLastError()); +- +- ret = CloseHandle( mutex ); +- ok(ret, "got error %lu\n", GetLastError()); +- +- ret = CloseHandle( mutex2 ); +- ok(ret, "got error %lu\n", GetLastError()); +- + } + + static void test_slist(void) +@@ -609,13 +512,12 @@ static void test_slist(void) + + static void test_event(void) + { +- HANDLE handle, handle2, handles[2]; ++ HANDLE handle, handle2; + SECURITY_ATTRIBUTES sa; + SECURITY_DESCRIPTOR sd; + ACL acl; + DWORD ret; + BOOL val; +- int i; + + /* no sd */ + handle = CreateEventA(NULL, FALSE, FALSE, __FILE__ ": Test Event"); +@@ -719,130 +621,11 @@ static void test_event(void) + ok( ret, "QueryMemoryResourceNotification failed err %lu\n", GetLastError() ); + ok( val == FALSE || val == TRUE, "wrong value %u\n", val ); + CloseHandle( handle ); +- +- handle = CreateEventA( NULL, TRUE, FALSE, NULL ); +- ok(!!handle, "got error %lu\n", GetLastError()); +- +- ret = WaitForSingleObject( handle, 0 ); +- ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); +- +- ret = SetEvent( handle ); +- ok(ret, "got error %lu\n", GetLastError()); +- +- ret = SetEvent( handle ); +- ok(ret, "got error %lu\n", GetLastError()); +- +- for (i = 0; i < 100; i++) +- { +- ret = WaitForSingleObject( handle, 0 ); +- ok(ret == 0, "got %lu\n", ret); +- } +- +- ret = ResetEvent( handle ); +- ok(ret, "got error %lu\n", GetLastError()); +- +- ret = ResetEvent( handle ); +- ok(ret, "got error %lu\n", GetLastError()); +- +- ret = WaitForSingleObject( handle, 0 ); +- ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); +- +- handle2 = CreateEventA( NULL, FALSE, TRUE, NULL ); +- ok(!!handle2, "got error %lu\n", GetLastError()); +- +- ret = WaitForSingleObject( handle2, 0 ); +- ok(ret == 0, "got %lu\n", ret); +- +- ret = WaitForSingleObject( handle2, 0 ); +- ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); +- +- ret = SetEvent( handle2 ); +- ok(ret, "got error %lu\n", GetLastError()); +- +- ret = SetEvent( handle2 ); +- ok(ret, "got error %lu\n", GetLastError()); +- +- ret = ResetEvent( handle2 ); +- ok(ret, "got error %lu\n", GetLastError()); +- +- ret = ResetEvent( handle2 ); +- ok(ret, "got error %lu\n", GetLastError()); +- +- ret = WaitForSingleObject( handle2, 0 ); +- ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); +- +- handles[0] = handle; +- handles[1] = handle2; +- +- ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); +- ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); +- +- SetEvent( handle ); +- SetEvent( handle2 ); +- +- ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); +- ok(ret == 0, "got %lu\n", ret); +- +- ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); +- ok(ret == 0, "got %lu\n", ret); +- +- ret = WaitForSingleObject( handle2, 0 ); +- ok(ret == 0, "got %lu\n", ret); +- +- ResetEvent( handle ); +- SetEvent( handle2 ); +- +- ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); +- ok(ret == 1, "got %lu\n", ret); +- +- ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); +- ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); +- +- SetEvent( handle ); +- SetEvent( handle2 ); +- +- ret = WaitForMultipleObjects( 2, handles, TRUE, 0 ); +- ok(ret == 0, "got %lu\n", ret); +- +- ret = WaitForMultipleObjects( 2, handles, TRUE, 0 ); +- ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); +- +- SetEvent( handle2 ); +- ResetEvent( handle ); +- +- ret = WaitForMultipleObjects( 2, handles, TRUE, 0 ); +- ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); +- +- ret = WaitForSingleObject( handle2, 0 ); +- ok(ret == 0, "got %lu\n", ret); +- +- handles[0] = handle2; +- handles[1] = handle; +- SetEvent( handle ); +- SetEvent( handle2 ); +- +- ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); +- ok(ret == 0, "got %lu\n", ret); +- +- ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); +- ok(ret == 1, "got %lu\n", ret); +- +- ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); +- ok(ret == 1, "got %lu\n", ret); +- +- ret = CloseHandle( handle ); +- ok(ret, "got error %lu\n", GetLastError()); +- +- ret = CloseHandle( handle2 ); +- ok(ret, "got error %lu\n", GetLastError()); + } + + static void test_semaphore(void) + { +- HANDLE handle, handle2, handles[2]; +- DWORD ret; +- LONG prev; +- int i; ++ HANDLE handle, handle2; + + /* test case sensitivity */ + +@@ -884,99 +667,6 @@ static void test_semaphore(void) + ok( GetLastError() == ERROR_INVALID_PARAMETER, "wrong error %lu\n", GetLastError()); + + CloseHandle( handle ); +- +- handle = CreateSemaphoreA( NULL, 0, 5, NULL ); +- ok(!!handle, "CreateSemaphore failed: %lu\n", GetLastError()); +- +- ret = WaitForSingleObject( handle, 0 ); +- ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); +- +- ret = ReleaseSemaphore( handle, 1, &prev ); +- ok(ret, "got error %lu\n", GetLastError()); +- ok(prev == 0, "got prev %ld\n", prev); +- +- ret = ReleaseSemaphore( handle, 1, &prev ); +- ok(ret, "got error %lu\n", GetLastError()); +- ok(prev == 1, "got prev %ld\n", prev); +- +- ret = ReleaseSemaphore( handle, 5, &prev ); +- ok(!ret, "got %ld\n", ret); +- ok(GetLastError() == ERROR_TOO_MANY_POSTS, "got error %lu\n", GetLastError()); +- ok(prev == 1, "got prev %ld\n", prev); +- +- ret = ReleaseSemaphore( handle, 2, &prev ); +- ok(ret, "got error %lu\n", GetLastError()); +- ok(prev == 2, "got prev %ld\n", prev); +- +- ret = ReleaseSemaphore( handle, 1, &prev ); +- ok(ret, "got error %lu\n", GetLastError()); +- ok(prev == 4, "got prev %ld\n", prev); +- +- for (i = 0; i < 5; i++) +- { +- ret = WaitForSingleObject( handle, 0 ); +- ok(ret == 0, "got %lu\n", ret); +- } +- +- ret = WaitForSingleObject( handle, 0 ); +- ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); +- +- handle2 = CreateSemaphoreA( NULL, 3, 5, NULL ); +- ok(!!handle2, "CreateSemaphore failed: %lu\n", GetLastError()); +- +- ret = ReleaseSemaphore( handle2, 1, &prev ); +- ok(ret, "got error %lu\n", GetLastError()); +- ok(prev == 3, "got prev %ld\n", prev); +- +- for (i = 0; i < 4; i++) +- { +- ret = WaitForSingleObject( handle2, 0 ); +- ok(ret == 0, "got %lu\n", ret); +- } +- +- ret = WaitForSingleObject( handle2, 0 ); +- ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); +- +- handles[0] = handle; +- handles[1] = handle2; +- +- ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); +- ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); +- +- ReleaseSemaphore( handle, 1, NULL ); +- ReleaseSemaphore( handle2, 1, NULL ); +- +- ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); +- ok(ret == 0, "got %lu\n", ret); +- +- ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); +- ok(ret == 1, "got %lu\n", ret); +- +- ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); +- ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); +- +- ReleaseSemaphore( handle, 1, NULL ); +- ReleaseSemaphore( handle2, 1, NULL ); +- +- ret = WaitForMultipleObjects( 2, handles, TRUE, 0 ); +- ok(ret == 0, "got %lu\n", ret); +- +- ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); +- ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); +- +- ReleaseSemaphore( handle, 1, NULL ); +- +- ret = WaitForMultipleObjects( 2, handles, TRUE, 0 ); +- ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); +- +- ret = WaitForSingleObject( handle, 0 ); +- ok(ret == 0, "got %lu\n", ret); +- +- ret = CloseHandle( handle ); +- ok(ret, "got error %lu\n", ret); +- +- ret = CloseHandle( handle2 ); +- ok(ret, "got error %lu\n", ret); + } + + static void test_waitable_timer(void) +@@ -1531,15 +1221,11 @@ static HANDLE modify_handle(HANDLE handle, DWORD modify) + return ULongToHandle(tmp); + } + +-#define TIMEOUT_INFINITE (((LONGLONG)0x7fffffff) << 32 | 0xffffffff) +- + static void test_WaitForSingleObject(void) + { + HANDLE signaled, nonsignaled, invalid; +- LARGE_INTEGER ntnow, ntthen; + LARGE_INTEGER timeout; + NTSTATUS status; +- DWORD now, then; + DWORD ret; + + signaled = CreateEventW(NULL, TRUE, TRUE, NULL); +@@ -1624,68 +1310,6 @@ static void test_WaitForSingleObject(void) + status = pNtWaitForSingleObject(GetCurrentThread(), FALSE, &timeout); + ok(status == STATUS_TIMEOUT, "expected STATUS_TIMEOUT, got %08lx\n", status); + +- ret = WaitForSingleObject( signaled, 0 ); +- ok(ret == 0, "got %lu\n", ret); +- +- ret = WaitForSingleObject( nonsignaled, 0 ); +- ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); +- +- /* test that a timed wait actually does wait */ +- now = GetTickCount(); +- ret = WaitForSingleObject( nonsignaled, 100 ); +- then = GetTickCount(); +- ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); +- ok(abs((then - now) - 100) < 5, "got %lu ms\n", then - now); +- +- now = GetTickCount(); +- ret = WaitForSingleObject( signaled, 100 ); +- then = GetTickCount(); +- ok(ret == 0, "got %lu\n", ret); +- ok(abs(then - now) < 5, "got %lu ms\n", then - now); +- +- ret = WaitForSingleObject( signaled, INFINITE ); +- ok(ret == 0, "got %lu\n", ret); +- +- /* test NT timeouts */ +- pNtQuerySystemTime( &ntnow ); +- timeout.QuadPart = ntnow.QuadPart + 100 * 10000; +- status = pNtWaitForSingleObject( nonsignaled, FALSE, &timeout ); +- pNtQuerySystemTime( &ntthen ); +- ok(status == STATUS_TIMEOUT, "got %#lx\n", status); +- ok(abs(((ntthen.QuadPart - ntnow.QuadPart) / 10000) - 100) < 5, "got %s ns\n", +- wine_dbgstr_longlong((ntthen.QuadPart - ntnow.QuadPart) * 100)); +- +- pNtQuerySystemTime( &ntnow ); +- timeout.QuadPart = -100 * 10000; +- status = pNtWaitForSingleObject( nonsignaled, FALSE, &timeout ); +- pNtQuerySystemTime( &ntthen ); +- ok(status == STATUS_TIMEOUT, "got %#lx\n", status); +- ok(abs(((ntthen.QuadPart - ntnow.QuadPart) / 10000) - 100) < 5, "got %s ns\n", +- wine_dbgstr_longlong((ntthen.QuadPart - ntnow.QuadPart) * 100)); +- +- status = pNtWaitForSingleObject( signaled, FALSE, NULL ); +- ok(status == 0, "got %#lx\n", status); +- +- timeout.QuadPart = TIMEOUT_INFINITE; +- status = pNtWaitForSingleObject( signaled, FALSE, &timeout ); +- ok(status == 0, "got %#lx\n", status); +- +- pNtQuerySystemTime( &ntnow ); +- timeout.QuadPart = ntnow.QuadPart; +- status = pNtWaitForSingleObject( nonsignaled, FALSE, &timeout ); +- pNtQuerySystemTime( &ntthen ); +- ok(status == STATUS_TIMEOUT, "got %#lx\n", status); +- ok(abs((ntthen.QuadPart - ntnow.QuadPart) / 10000) < 5, "got %s ns\n", +- wine_dbgstr_longlong((ntthen.QuadPart - ntnow.QuadPart) * 100)); +- +- pNtQuerySystemTime( &ntnow ); +- timeout.QuadPart = ntnow.QuadPart - 100 * 10000; +- status = pNtWaitForSingleObject( nonsignaled, FALSE, &timeout ); +- pNtQuerySystemTime( &ntthen ); +- ok(status == STATUS_TIMEOUT, "got %#lx\n", status); +- ok(abs((ntthen.QuadPart - ntnow.QuadPart) / 10000) < 5, "got %s ns\n", +- wine_dbgstr_longlong((ntthen.QuadPart - ntnow.QuadPart) * 100)); +- + CloseHandle(signaled); + CloseHandle(nonsignaled); + } +@@ -3351,7 +2975,6 @@ START_TEST(sync) + pTryAcquireSRWLockShared = (void *)GetProcAddress(hdll, "TryAcquireSRWLockShared"); + pNtAllocateVirtualMemory = (void *)GetProcAddress(hntdll, "NtAllocateVirtualMemory"); + pNtFreeVirtualMemory = (void *)GetProcAddress(hntdll, "NtFreeVirtualMemory"); +- pNtQuerySystemTime = (void *)GetProcAddress(hntdll, "NtQuerySystemTime"); + pNtWaitForSingleObject = (void *)GetProcAddress(hntdll, "NtWaitForSingleObject"); + pNtWaitForMultipleObjects = (void *)GetProcAddress(hntdll, "NtWaitForMultipleObjects"); + pRtlInterlockedPushListSList = (void *)GetProcAddress(hntdll, "RtlInterlockedPushListSList"); +diff --git a/dlls/ntdll/Makefile.in b/dlls/ntdll/Makefile.in +index 4629457..f71f79b 100644 +--- a/dlls/ntdll/Makefile.in ++++ b/dlls/ntdll/Makefile.in +@@ -48,7 +48,6 @@ SOURCES = \ + unix/cdrom.c \ + unix/debug.c \ + unix/env.c \ +- unix/esync.c \ + unix/file.c \ + unix/loader.c \ + unix/loadorder.c \ +diff --git a/dlls/ntdll/unix/esync.c b/dlls/ntdll/unix/esync.c +deleted file mode 100644 +index edfeb03..0000000 +--- a/dlls/ntdll/unix/esync.c ++++ /dev/null +@@ -1,1337 +0,0 @@ +-/* +- * eventfd-based synchronization objects +- * +- * Copyright (C) 2018 Zebediah Figura +- * +- * This library is free software; you can redistribute it and/or +- * modify it under the terms of the GNU Lesser General Public +- * License as published by the Free Software Foundation; either +- * version 2.1 of the License, or (at your option) any later version. +- * +- * This library is distributed in the hope that it will be useful, +- * but WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- * Lesser General Public License for more details. +- * +- * You should have received a copy of the GNU Lesser General Public +- * License along with this library; if not, write to the Free Software +- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA +- */ +- +-#if 0 +-#pragma makedep unix +-#endif +- +-#ifndef _GNU_SOURCE +-#define _GNU_SOURCE +-#endif +- +-#include "config.h" +- +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#ifdef HAVE_SYS_STAT_H +-# include +-#endif +-#include +-#include +-#include +- +-#include "ntstatus.h" +-#define WIN32_NO_STATUS +-#include "windef.h" +-#include "winternl.h" +-#include "wine/server.h" +-#include "wine/debug.h" +- +-#include "unix_private.h" +-#include "esync.h" +- +-WINE_DEFAULT_DEBUG_CHANNEL(esync); +- +-int do_esync(void) +-{ +-#ifdef HAVE_SYS_EVENTFD_H +- static int do_esync_cached = -1; +- +- if (do_esync_cached == -1) +- do_esync_cached = getenv("WINEESYNC") && atoi(getenv("WINEESYNC")); +- +- return do_esync_cached; +-#else +- static int once; +- if (!once++) +- FIXME("eventfd not supported on this platform.\n"); +- return 0; +-#endif +-} +- +-struct esync +-{ +- enum esync_type type; +- int fd; +- void *shm; +-}; +- +-struct semaphore +-{ +- int max; +- int count; +-}; +-C_ASSERT(sizeof(struct semaphore) == 8); +- +-struct mutex +-{ +- DWORD tid; +- int count; /* recursion count */ +-}; +-C_ASSERT(sizeof(struct mutex) == 8); +- +-struct event +-{ +- int signaled; +- int locked; +-}; +-C_ASSERT(sizeof(struct event) == 8); +- +-static char shm_name[29]; +-static int shm_fd; +-static void **shm_addrs; +-static int shm_addrs_size; /* length of the allocated shm_addrs array */ +-static long pagesize; +- +-static pthread_mutex_t shm_addrs_mutex = PTHREAD_MUTEX_INITIALIZER; +- +-static void *get_shm( unsigned int idx ) +-{ +- int entry = (idx * 8) / pagesize; +- int offset = (idx * 8) % pagesize; +- void *ret; +- +- pthread_mutex_lock( &shm_addrs_mutex ); +- +- if (entry >= shm_addrs_size) +- { +- int new_size = max(shm_addrs_size * 2, entry + 1); +- +- if (!(shm_addrs = realloc( shm_addrs, new_size * sizeof(shm_addrs[0]) ))) +- ERR("Failed to grow shm_addrs array to size %d.\n", shm_addrs_size); +- memset( shm_addrs + shm_addrs_size, 0, (new_size - shm_addrs_size) * sizeof(shm_addrs[0]) ); +- shm_addrs_size = new_size; +- } +- +- if (!shm_addrs[entry]) +- { +- void *addr = mmap( NULL, pagesize, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, entry * pagesize ); +- if (addr == (void *)-1) +- ERR("Failed to map page %d (offset %#lx).\n", entry, entry * pagesize); +- +- TRACE("Mapping page %d at %p.\n", entry, addr); +- +- if (InterlockedCompareExchangePointer( &shm_addrs[entry], addr, 0 )) +- munmap( addr, pagesize ); /* someone beat us to it */ +- } +- +- ret = (void *)((unsigned long)shm_addrs[entry] + offset); +- +- pthread_mutex_unlock( &shm_addrs_mutex ); +- +- return ret; +-} +- +-/* We'd like lookup to be fast. To that end, we use a static list indexed by handle. +- * This is copied and adapted from the fd cache code. */ +- +-#define ESYNC_LIST_BLOCK_SIZE (65536 / sizeof(struct esync)) +-#define ESYNC_LIST_ENTRIES 256 +- +-static struct esync *esync_list[ESYNC_LIST_ENTRIES]; +-static struct esync esync_list_initial_block[ESYNC_LIST_BLOCK_SIZE]; +- +-static inline UINT_PTR handle_to_index( HANDLE handle, UINT_PTR *entry ) +-{ +- UINT_PTR idx = (((UINT_PTR)handle) >> 2) - 1; +- *entry = idx / ESYNC_LIST_BLOCK_SIZE; +- return idx % ESYNC_LIST_BLOCK_SIZE; +-} +- +-static struct esync *add_to_list( HANDLE handle, enum esync_type type, int fd, void *shm ) +-{ +- UINT_PTR entry, idx = handle_to_index( handle, &entry ); +- +- if (entry >= ESYNC_LIST_ENTRIES) +- { +- FIXME( "too many allocated handles, not caching %p\n", handle ); +- return FALSE; +- } +- +- if (!esync_list[entry]) /* do we need to allocate a new block of entries? */ +- { +- if (!entry) esync_list[0] = esync_list_initial_block; +- else +- { +- void *ptr = anon_mmap_alloc( ESYNC_LIST_BLOCK_SIZE * sizeof(struct esync), +- PROT_READ | PROT_WRITE ); +- if (ptr == MAP_FAILED) return FALSE; +- esync_list[entry] = ptr; +- } +- } +- +- if (!InterlockedCompareExchange( (int *)&esync_list[entry][idx].type, type, 0 )) +- { +- esync_list[entry][idx].fd = fd; +- esync_list[entry][idx].shm = shm; +- } +- return &esync_list[entry][idx]; +-} +- +-static struct esync *get_cached_object( HANDLE handle ) +-{ +- UINT_PTR entry, idx = handle_to_index( handle, &entry ); +- +- if (entry >= ESYNC_LIST_ENTRIES || !esync_list[entry]) return NULL; +- if (!esync_list[entry][idx].type) return NULL; +- +- return &esync_list[entry][idx]; +-} +- +-/* Gets an object. This is either a proper esync object (i.e. an event, +- * semaphore, etc. created using create_esync) or a generic synchronizable +- * server-side object which the server will signal (e.g. a process, thread, +- * message queue, etc.) */ +-static NTSTATUS get_object( HANDLE handle, struct esync **obj ) +-{ +- NTSTATUS ret = STATUS_SUCCESS; +- enum esync_type type = 0; +- unsigned int shm_idx = 0; +- obj_handle_t fd_handle; +- sigset_t sigset; +- int fd = -1; +- +- if ((*obj = get_cached_object( handle ))) return STATUS_SUCCESS; +- +- if ((INT_PTR)handle < 0) +- { +- /* We can deal with pseudo-handles, but it's just easier this way */ +- return STATUS_NOT_IMPLEMENTED; +- } +- +- if (!handle) +- { +- /* Shadow of the Tomb Raider really likes passing in NULL handles to +- * various functions. Concerning, but let's avoid a server call. */ +- return STATUS_INVALID_HANDLE; +- } +- +- /* We need to try grabbing it from the server. */ +- server_enter_uninterrupted_section( &fd_cache_mutex, &sigset ); +- if (!(*obj = get_cached_object( handle ))) +- { +- SERVER_START_REQ( get_esync_fd ) +- { +- req->handle = wine_server_obj_handle( handle ); +- if (!(ret = wine_server_call( req ))) +- { +- type = reply->type; +- shm_idx = reply->shm_idx; +- fd = receive_fd( &fd_handle ); +- assert( wine_server_ptr_handle(fd_handle) == handle ); +- } +- } +- SERVER_END_REQ; +- } +- server_leave_uninterrupted_section( &fd_cache_mutex, &sigset ); +- +- if (*obj) +- { +- /* We managed to grab it while in the CS; return it. */ +- return STATUS_SUCCESS; +- } +- +- if (ret) +- { +- WARN("Failed to retrieve fd for handle %p, status %#x.\n", handle, ret); +- *obj = NULL; +- return ret; +- } +- +- TRACE("Got fd %d for handle %p.\n", fd, handle); +- +- *obj = add_to_list( handle, type, fd, shm_idx ? get_shm( shm_idx ) : 0 ); +- return ret; +-} +- +-NTSTATUS esync_close( HANDLE handle ) +-{ +- UINT_PTR entry, idx = handle_to_index( handle, &entry ); +- +- TRACE("%p.\n", handle); +- +- if (entry < ESYNC_LIST_ENTRIES && esync_list[entry]) +- { +- if (InterlockedExchange((int *)&esync_list[entry][idx].type, 0)) +- { +- close( esync_list[entry][idx].fd ); +- return STATUS_SUCCESS; +- } +- } +- +- return STATUS_INVALID_HANDLE; +-} +- +-static NTSTATUS create_esync( enum esync_type type, HANDLE *handle, ACCESS_MASK access, +- const OBJECT_ATTRIBUTES *attr, int initval, int max ) +-{ +- NTSTATUS ret; +- data_size_t len; +- struct object_attributes *objattr; +- obj_handle_t fd_handle; +- unsigned int shm_idx; +- sigset_t sigset; +- int fd; +- +- if ((ret = alloc_object_attributes( attr, &objattr, &len ))) return ret; +- +- /* We have to synchronize on the fd cache CS so that our calls to +- * receive_fd don't race with theirs. */ +- server_enter_uninterrupted_section( &fd_cache_mutex, &sigset ); +- SERVER_START_REQ( create_esync ) +- { +- req->access = access; +- req->initval = initval; +- req->type = type; +- req->max = max; +- wine_server_add_data( req, objattr, len ); +- ret = wine_server_call( req ); +- if (!ret || ret == STATUS_OBJECT_NAME_EXISTS) +- { +- *handle = wine_server_ptr_handle( reply->handle ); +- type = reply->type; +- shm_idx = reply->shm_idx; +- fd = receive_fd( &fd_handle ); +- assert( wine_server_ptr_handle(fd_handle) == *handle ); +- } +- } +- SERVER_END_REQ; +- server_leave_uninterrupted_section( &fd_cache_mutex, &sigset ); +- +- if (!ret || ret == STATUS_OBJECT_NAME_EXISTS) +- { +- add_to_list( *handle, type, fd, shm_idx ? get_shm( shm_idx ) : 0 ); +- TRACE("-> handle %p, fd %d.\n", *handle, fd); +- } +- +- free( objattr ); +- return ret; +-} +- +-static NTSTATUS open_esync( enum esync_type type, HANDLE *handle, +- ACCESS_MASK access, const OBJECT_ATTRIBUTES *attr ) +-{ +- NTSTATUS ret; +- obj_handle_t fd_handle; +- unsigned int shm_idx; +- sigset_t sigset; +- int fd; +- +- server_enter_uninterrupted_section( &fd_cache_mutex, &sigset ); +- SERVER_START_REQ( open_esync ) +- { +- req->access = access; +- req->attributes = attr->Attributes; +- req->rootdir = wine_server_obj_handle( attr->RootDirectory ); +- req->type = type; +- if (attr->ObjectName) +- wine_server_add_data( req, attr->ObjectName->Buffer, attr->ObjectName->Length ); +- if (!(ret = wine_server_call( req ))) +- { +- *handle = wine_server_ptr_handle( reply->handle ); +- type = reply->type; +- shm_idx = reply->shm_idx; +- fd = receive_fd( &fd_handle ); +- assert( wine_server_ptr_handle(fd_handle) == *handle ); +- } +- } +- SERVER_END_REQ; +- server_leave_uninterrupted_section( &fd_cache_mutex, &sigset ); +- +- if (!ret) +- { +- add_to_list( *handle, type, fd, shm_idx ? get_shm( shm_idx ) : 0 ); +- +- TRACE("-> handle %p, fd %d.\n", *handle, fd); +- } +- return ret; +-} +- +-extern NTSTATUS esync_create_semaphore(HANDLE *handle, ACCESS_MASK access, +- const OBJECT_ATTRIBUTES *attr, LONG initial, LONG max) +-{ +- TRACE("name %s, initial %d, max %d.\n", +- attr ? debugstr_us(attr->ObjectName) : "", initial, max); +- +- return create_esync( ESYNC_SEMAPHORE, handle, access, attr, initial, max ); +-} +- +-NTSTATUS esync_open_semaphore( HANDLE *handle, ACCESS_MASK access, +- const OBJECT_ATTRIBUTES *attr ) +-{ +- TRACE("name %s.\n", debugstr_us(attr->ObjectName)); +- +- return open_esync( ESYNC_SEMAPHORE, handle, access, attr ); +-} +- +-NTSTATUS esync_release_semaphore( HANDLE handle, ULONG count, ULONG *prev ) +-{ +- struct esync *obj; +- struct semaphore *semaphore; +- uint64_t count64 = count; +- ULONG current; +- NTSTATUS ret; +- +- TRACE("%p, %d, %p.\n", handle, count, prev); +- +- if ((ret = get_object( handle, &obj))) return ret; +- semaphore = obj->shm; +- +- do +- { +- current = semaphore->count; +- +- if (count + current > semaphore->max) +- return STATUS_SEMAPHORE_LIMIT_EXCEEDED; +- } while (InterlockedCompareExchange( &semaphore->count, count + current, current ) != current); +- +- if (prev) *prev = current; +- +- /* We don't have to worry about a race between increasing the count and +- * write(). The fact that we were able to increase the count means that we +- * have permission to actually write that many releases to the semaphore. */ +- +- if (write( obj->fd, &count64, sizeof(count64) ) == -1) +- return errno_to_status( errno ); +- +- return STATUS_SUCCESS; +-} +- +-NTSTATUS esync_query_semaphore( HANDLE handle, void *info, ULONG *ret_len ) +-{ +- struct esync *obj; +- struct semaphore *semaphore; +- SEMAPHORE_BASIC_INFORMATION *out = info; +- NTSTATUS ret; +- +- TRACE("handle %p, info %p, ret_len %p.\n", handle, info, ret_len); +- +- if ((ret = get_object( handle, &obj ))) return ret; +- semaphore = obj->shm; +- +- out->CurrentCount = semaphore->count; +- out->MaximumCount = semaphore->max; +- if (ret_len) *ret_len = sizeof(*out); +- +- return STATUS_SUCCESS; +-} +- +-NTSTATUS esync_create_event( HANDLE *handle, ACCESS_MASK access, +- const OBJECT_ATTRIBUTES *attr, EVENT_TYPE event_type, BOOLEAN initial ) +-{ +- enum esync_type type = (event_type == SynchronizationEvent ? ESYNC_AUTO_EVENT : ESYNC_MANUAL_EVENT); +- +- TRACE("name %s, %s-reset, initial %d.\n", +- attr ? debugstr_us(attr->ObjectName) : "", +- event_type == NotificationEvent ? "manual" : "auto", initial); +- +- return create_esync( type, handle, access, attr, initial, 0 ); +-} +- +-NTSTATUS esync_open_event( HANDLE *handle, ACCESS_MASK access, +- const OBJECT_ATTRIBUTES *attr ) +-{ +- TRACE("name %s.\n", debugstr_us(attr->ObjectName)); +- +- return open_esync( ESYNC_AUTO_EVENT, handle, access, attr ); /* doesn't matter which */ +-} +- +-static inline void small_pause(void) +-{ +-#ifdef __i386__ +- __asm__ __volatile__( "rep;nop" : : : "memory" ); +-#else +- __asm__ __volatile__( "" : : : "memory" ); +-#endif +-} +- +-/* Manual-reset events are actually racier than other objects in terms of shm +- * state. With other objects, races don't matter, because we only treat the shm +- * state as a hint that lets us skip poll()—we still have to read(). But with +- * manual-reset events we don't, which means that the shm state can be out of +- * sync with the actual state. +- * +- * In general we shouldn't have to worry about races between modifying the +- * event and waiting on it. If the state changes while we're waiting, it's +- * equally plausible that we caught it before or after the state changed. +- * However, we can have races between SetEvent() and ResetEvent(), so that the +- * event has inconsistent internal state. +- * +- * To solve this we have to use the other field to lock the event. Currently +- * this is implemented as a spinlock, but I'm not sure if a futex might be +- * better. I'm also not sure if it's possible to obviate locking by arranging +- * writes and reads in a certain way. +- * +- * Note that we don't have to worry about locking in esync_wait_objects(). +- * There's only two general patterns: +- * +- * WaitFor() SetEvent() +- * ------------------------- +- * read() +- * signaled = 0 +- * signaled = 1 +- * write() +- * ------------------------- +- * read() +- * signaled = 1 +- * signaled = 0 +- * +- * ------------------------- +- * +- * That is, if SetEvent() tries to signal the event before WaitFor() resets its +- * signaled state, it won't bother trying to write(), and then the signaled +- * state will be reset, so the result is a consistent non-signaled event. +- * There's several variations to this pattern but all of them are protected in +- * the same way. Note however this is why we have to use interlocked_xchg() +- * event inside of the lock. +- */ +- +-/* Removing this spinlock is harder than it looks. esync_wait_objects() can +- * deal with inconsistent state well enough, and a race between SetEvent() and +- * ResetEvent() gives us license to yield either result as long as we act +- * consistently, but that's not enough. Notably, esync_wait_objects() should +- * probably act like a fence, so that the second half of esync_set_event() does +- * not seep past a subsequent reset. That's one problem, but no guarantee there +- * aren't others. */ +- +-NTSTATUS esync_set_event( HANDLE handle ) +-{ +- static const uint64_t value = 1; +- struct esync *obj; +- struct event *event; +- NTSTATUS ret; +- +- TRACE("%p.\n", handle); +- +- if ((ret = get_object( handle, &obj ))) return ret; +- event = obj->shm; +- +- if (obj->type == ESYNC_MANUAL_EVENT) +- { +- /* Acquire the spinlock. */ +- while (InterlockedCompareExchange( &event->locked, 1, 0 )) +- small_pause(); +- } +- +- /* For manual-reset events, as long as we're in a lock, we can take the +- * optimization of only calling write() if the event wasn't already +- * signaled. +- * +- * For auto-reset events, esync_wait_objects() must grab the kernel object. +- * Thus if we got into a race so that the shm state is signaled but the +- * eventfd is unsignaled (i.e. reset shm, set shm, set fd, reset fd), we +- * *must* signal the fd now, or any waiting threads will never wake up. */ +- +- if (!InterlockedExchange( &event->signaled, 1 ) || obj->type == ESYNC_AUTO_EVENT) +- { +- if (write( obj->fd, &value, sizeof(value) ) == -1) +- ERR("write: %s\n", strerror(errno)); +- } +- +- if (obj->type == ESYNC_MANUAL_EVENT) +- { +- /* Release the spinlock. */ +- event->locked = 0; +- } +- +- return STATUS_SUCCESS; +-} +- +-NTSTATUS esync_reset_event( HANDLE handle ) +-{ +- uint64_t value; +- struct esync *obj; +- struct event *event; +- NTSTATUS ret; +- +- TRACE("%p.\n", handle); +- +- if ((ret = get_object( handle, &obj ))) return ret; +- event = obj->shm; +- +- if (obj->type == ESYNC_MANUAL_EVENT) +- { +- /* Acquire the spinlock. */ +- while (InterlockedCompareExchange( &event->locked, 1, 0 )) +- small_pause(); +- } +- +- /* For manual-reset events, as long as we're in a lock, we can take the +- * optimization of only calling read() if the event was already signaled. +- * +- * For auto-reset events, we have no guarantee that the previous "signaled" +- * state is actually correct. We need to leave both states unsignaled after +- * leaving this function, so we always have to read(). */ +- if (InterlockedExchange( &event->signaled, 0 ) || obj->type == ESYNC_AUTO_EVENT) +- { +- if (read( obj->fd, &value, sizeof(value) ) == -1 && errno != EWOULDBLOCK && errno != EAGAIN) +- { +- ERR("read: %s\n", strerror(errno)); +- } +- } +- +- if (obj->type == ESYNC_MANUAL_EVENT) +- { +- /* Release the spinlock. */ +- event->locked = 0; +- } +- +- return STATUS_SUCCESS; +-} +- +-NTSTATUS esync_pulse_event( HANDLE handle ) +-{ +- uint64_t value = 1; +- struct esync *obj; +- NTSTATUS ret; +- +- TRACE("%p.\n", handle); +- +- if ((ret = get_object( handle, &obj ))) return ret; +- +- /* This isn't really correct; an application could miss the write. +- * Unfortunately we can't really do much better. Fortunately this is rarely +- * used (and publicly deprecated). */ +- if (write( obj->fd, &value, sizeof(value) ) == -1) +- return errno_to_status( errno ); +- +- /* Try to give other threads a chance to wake up. Hopefully erring on this +- * side is the better thing to do... */ +- NtYieldExecution(); +- +- read( obj->fd, &value, sizeof(value) ); +- +- return STATUS_SUCCESS; +-} +- +-NTSTATUS esync_query_event( HANDLE handle, void *info, ULONG *ret_len ) +-{ +- struct esync *obj; +- EVENT_BASIC_INFORMATION *out = info; +- struct pollfd fd; +- NTSTATUS ret; +- +- TRACE("handle %p, info %p, ret_len %p.\n", handle, info, ret_len); +- +- if ((ret = get_object( handle, &obj ))) return ret; +- +- fd.fd = obj->fd; +- fd.events = POLLIN; +- out->EventState = poll( &fd, 1, 0 ); +- out->EventType = (obj->type == ESYNC_AUTO_EVENT ? SynchronizationEvent : NotificationEvent); +- if (ret_len) *ret_len = sizeof(*out); +- +- return STATUS_SUCCESS; +-} +- +-NTSTATUS esync_create_mutex( HANDLE *handle, ACCESS_MASK access, +- const OBJECT_ATTRIBUTES *attr, BOOLEAN initial ) +-{ +- TRACE("name %s, initial %d.\n", +- attr ? debugstr_us(attr->ObjectName) : "", initial); +- +- return create_esync( ESYNC_MUTEX, handle, access, attr, initial ? 0 : 1, 0 ); +-} +- +-NTSTATUS esync_open_mutex( HANDLE *handle, ACCESS_MASK access, +- const OBJECT_ATTRIBUTES *attr ) +-{ +- TRACE("name %s.\n", debugstr_us(attr->ObjectName)); +- +- return open_esync( ESYNC_MUTEX, handle, access, attr ); +-} +- +-NTSTATUS esync_release_mutex( HANDLE *handle, LONG *prev ) +-{ +- struct esync *obj; +- struct mutex *mutex; +- static const uint64_t value = 1; +- NTSTATUS ret; +- +- TRACE("%p, %p.\n", handle, prev); +- +- if ((ret = get_object( handle, &obj ))) return ret; +- mutex = obj->shm; +- +- /* This is thread-safe, because the only thread that can change the tid to +- * or from our tid is ours. */ +- if (mutex->tid != GetCurrentThreadId()) return STATUS_MUTANT_NOT_OWNED; +- +- if (prev) *prev = mutex->count; +- +- mutex->count--; +- +- if (!mutex->count) +- { +- /* This is also thread-safe, as long as signaling the file is the last +- * thing we do. Other threads don't care about the tid if it isn't +- * theirs. */ +- mutex->tid = 0; +- +- if (write( obj->fd, &value, sizeof(value) ) == -1) +- return errno_to_status( errno ); +- } +- +- return STATUS_SUCCESS; +-} +- +-NTSTATUS esync_query_mutex( HANDLE handle, void *info, ULONG *ret_len ) +-{ +- struct esync *obj; +- struct mutex *mutex; +- MUTANT_BASIC_INFORMATION *out = info; +- NTSTATUS ret; +- +- TRACE("handle %p, info %p, ret_len %p.\n", handle, info, ret_len); +- +- if ((ret = get_object( handle, &obj ))) return ret; +- mutex = obj->shm; +- +- out->CurrentCount = 1 - mutex->count; +- out->OwnedByCaller = (mutex->tid == GetCurrentThreadId()); +- out->AbandonedState = (mutex->tid == ~0); +- if (ret_len) *ret_len = sizeof(*out); +- +- return STATUS_SUCCESS; +-} +- +-#define TICKSPERSEC 10000000 +-#define TICKSPERMSEC 10000 +- +-static LONGLONG update_timeout( ULONGLONG end ) +-{ +- LARGE_INTEGER now; +- LONGLONG timeleft; +- +- NtQuerySystemTime( &now ); +- timeleft = end - now.QuadPart; +- if (timeleft < 0) timeleft = 0; +- return timeleft; +-} +- +-static int do_poll( struct pollfd *fds, nfds_t nfds, ULONGLONG *end ) +-{ +- int ret; +- +- do +- { +- if (end) +- { +- LONGLONG timeleft = update_timeout( *end ); +- +-#ifdef HAVE_PPOLL +- /* We use ppoll() if available since the time granularity is better. */ +- struct timespec tmo_p; +- tmo_p.tv_sec = timeleft / (ULONGLONG)TICKSPERSEC; +- tmo_p.tv_nsec = (timeleft % TICKSPERSEC) * 100; +- ret = ppoll( fds, nfds, &tmo_p, NULL ); +-#else +- ret = poll( fds, nfds, timeleft / TICKSPERMSEC ); +-#endif +- } +- else +- ret = poll( fds, nfds, -1 ); +- +- /* If we receive EINTR we were probably suspended (SIGUSR1), possibly for a +- * system APC. The right thing to do is just try again. */ +- } while (ret < 0 && errno == EINTR); +- +- return ret; +-} +- +-/* Return TRUE if abandoned. */ +-static BOOL update_grabbed_object( struct esync *obj ) +-{ +- BOOL ret = FALSE; +- +- if (obj->type == ESYNC_MUTEX) +- { +- struct mutex *mutex = obj->shm; +- /* We don't have to worry about a race between this and read(); the +- * fact that we grabbed it means the count is now zero, so nobody else +- * can (and the only thread that can release it is us). */ +- if (mutex->tid == ~0) +- ret = TRUE; +- mutex->tid = GetCurrentThreadId(); +- mutex->count++; +- } +- else if (obj->type == ESYNC_SEMAPHORE) +- { +- struct semaphore *semaphore = obj->shm; +- /* We don't have to worry about a race between this and read(); the +- * fact that we were able to grab it at all means the count is nonzero, +- * and if someone else grabbed it then the count must have been >= 2, +- * etc. */ +- InterlockedExchangeAdd( &semaphore->count, -1 ); +- } +- else if (obj->type == ESYNC_AUTO_EVENT) +- { +- struct event *event = obj->shm; +- /* We don't have to worry about a race between this and read(), since +- * this is just a hint, and the real state is in the kernel object. +- * This might already be 0, but that's okay! */ +- event->signaled = 0; +- } +- +- return ret; +-} +- +-/* A value of STATUS_NOT_IMPLEMENTED returned from this function means that we +- * need to delegate to server_select(). */ +-static NTSTATUS __esync_wait_objects( DWORD count, const HANDLE *handles, BOOLEAN wait_any, +- BOOLEAN alertable, const LARGE_INTEGER *timeout ) +-{ +- static const LARGE_INTEGER zero; +- +- struct esync *objs[MAXIMUM_WAIT_OBJECTS]; +- struct pollfd fds[MAXIMUM_WAIT_OBJECTS + 1]; +- int has_esync = 0, has_server = 0; +- BOOL msgwait = FALSE; +- LONGLONG timeleft; +- LARGE_INTEGER now; +- DWORD pollcount; +- ULONGLONG end; +- int64_t value; +- ssize_t size; +- int i, j, ret; +- +- /* Grab the APC fd if we don't already have it. */ +- if (alertable && ntdll_get_thread_data()->esync_apc_fd == -1) +- { +- obj_handle_t fd_handle; +- sigset_t sigset; +- int fd = -1; +- +- server_enter_uninterrupted_section( &fd_cache_mutex, &sigset ); +- SERVER_START_REQ( get_esync_apc_fd ) +- { +- if (!(ret = wine_server_call( req ))) +- { +- fd = receive_fd( &fd_handle ); +- assert( fd_handle == GetCurrentThreadId() ); +- } +- } +- SERVER_END_REQ; +- server_leave_uninterrupted_section( &fd_cache_mutex, &sigset ); +- +- ntdll_get_thread_data()->esync_apc_fd = fd; +- } +- +- NtQuerySystemTime( &now ); +- if (timeout) +- { +- if (timeout->QuadPart == TIMEOUT_INFINITE) +- timeout = NULL; +- else if (timeout->QuadPart >= 0) +- end = timeout->QuadPart; +- else +- end = now.QuadPart - timeout->QuadPart; +- } +- +- for (i = 0; i < count; i++) +- { +- ret = get_object( handles[i], &objs[i] ); +- if (ret == STATUS_SUCCESS) +- has_esync = 1; +- else if (ret == STATUS_NOT_IMPLEMENTED) +- has_server = 1; +- else +- return ret; +- } +- +- if (objs[count - 1] && objs[count - 1]->type == ESYNC_QUEUE) +- msgwait = TRUE; +- +- if (has_esync && has_server) +- FIXME("Can't wait on esync and server objects at the same time!\n"); +- else if (has_server) +- return STATUS_NOT_IMPLEMENTED; +- +- if (TRACE_ON(esync)) +- { +- TRACE("Waiting for %s of %d handles:", wait_any ? "any" : "all", count); +- for (i = 0; i < count; i++) +- TRACE(" %p", handles[i]); +- +- if (msgwait) +- TRACE(" or driver events"); +- if (alertable) +- TRACE(", alertable"); +- +- if (!timeout) +- TRACE(", timeout = INFINITE.\n"); +- else +- { +- timeleft = update_timeout( end ); +- TRACE(", timeout = %ld.%07ld sec.\n", +- (long) timeleft / TICKSPERSEC, (long) timeleft % TICKSPERSEC); +- } +- } +- +- if (wait_any || count == 1) +- { +- /* Try to check objects now, so we can obviate poll() at least. */ +- for (i = 0; i < count; i++) +- { +- struct esync *obj = objs[i]; +- +- if (obj) +- { +- switch (obj->type) +- { +- case ESYNC_MUTEX: +- { +- struct mutex *mutex = obj->shm; +- +- if (mutex->tid == GetCurrentThreadId()) +- { +- TRACE("Woken up by handle %p [%d].\n", handles[i], i); +- mutex->count++; +- return i; +- } +- else if (!mutex->count) +- { +- if ((size = read( obj->fd, &value, sizeof(value) )) == sizeof(value)) +- { +- if (mutex->tid == ~0) +- { +- TRACE("Woken up by abandoned mutex %p [%d].\n", handles[i], i); +- i += STATUS_ABANDONED_WAIT_0; +- } +- else +- TRACE("Woken up by handle %p [%d].\n", handles[i], i); +- mutex->tid = GetCurrentThreadId(); +- mutex->count++; +- return i; +- } +- } +- break; +- } +- case ESYNC_SEMAPHORE: +- { +- struct semaphore *semaphore = obj->shm; +- +- if (semaphore->count) +- { +- if ((size = read( obj->fd, &value, sizeof(value) )) == sizeof(value)) +- { +- TRACE("Woken up by handle %p [%d].\n", handles[i], i); +- InterlockedDecrement( &semaphore->count ); +- return i; +- } +- } +- break; +- } +- case ESYNC_AUTO_EVENT: +- { +- struct event *event = obj->shm; +- +- if (event->signaled) +- { +- if ((size = read( obj->fd, &value, sizeof(value) )) == sizeof(value)) +- { +- TRACE("Woken up by handle %p [%d].\n", handles[i], i); +- event->signaled = 0; +- return i; +- } +- } +- break; +- } +- case ESYNC_MANUAL_EVENT: +- { +- struct event *event = obj->shm; +- +- if (event->signaled) +- { +- TRACE("Woken up by handle %p [%d].\n", handles[i], i); +- return i; +- } +- break; +- } +- case ESYNC_AUTO_SERVER: +- case ESYNC_MANUAL_SERVER: +- case ESYNC_QUEUE: +- /* We can't wait on any of these. Fortunately I don't think +- * they'll ever be uncontended anyway (at least, they won't be +- * performance-critical). */ +- break; +- } +- } +- +- fds[i].fd = obj ? obj->fd : -1; +- fds[i].events = POLLIN; +- } +- if (alertable) +- { +- fds[i].fd = ntdll_get_thread_data()->esync_apc_fd; +- fds[i].events = POLLIN; +- i++; +- } +- pollcount = i; +- +- while (1) +- { +- ret = do_poll( fds, pollcount, timeout ? &end : NULL ); +- if (ret > 0) +- { +- /* We must check this first! The server may set an event that +- * we're waiting on, but we need to return STATUS_USER_APC. */ +- if (alertable) +- { +- if (fds[pollcount - 1].revents & POLLIN) +- goto userapc; +- } +- +- /* Find out which object triggered the wait. */ +- for (i = 0; i < count; i++) +- { +- struct esync *obj = objs[i]; +- +- if (fds[i].revents & (POLLERR | POLLHUP | POLLNVAL)) +- { +- ERR("Polling on fd %d returned %#x.\n", fds[i].fd, fds[i].revents); +- return STATUS_INVALID_HANDLE; +- } +- +- if (obj) +- { +- if (obj->type == ESYNC_MANUAL_EVENT +- || obj->type == ESYNC_MANUAL_SERVER +- || obj->type == ESYNC_QUEUE) +- { +- /* Don't grab the object, just check if it's signaled. */ +- if (fds[i].revents & POLLIN) +- { +- TRACE("Woken up by handle %p [%d].\n", handles[i], i); +- return i; +- } +- } +- else +- { +- if ((size = read( fds[i].fd, &value, sizeof(value) )) == sizeof(value)) +- { +- /* We found our object. */ +- TRACE("Woken up by handle %p [%d].\n", handles[i], i); +- if (update_grabbed_object( obj )) +- return STATUS_ABANDONED_WAIT_0 + i; +- return i; +- } +- } +- } +- } +- +- /* If we got here, someone else stole (or reset, etc.) whatever +- * we were waiting for. So keep waiting. */ +- NtQuerySystemTime( &now ); +- } +- else +- goto err; +- } +- } +- else +- { +- /* Wait-all is a little trickier to implement correctly. Fortunately, +- * it's not as common. +- * +- * The idea is basically just to wait in sequence on every object in the +- * set. Then when we're done, try to grab them all in a tight loop. If +- * that fails, release any resources we've grabbed (and yes, we can +- * reliably do this—it's just mutexes and semaphores that we have to +- * put back, and in both cases we just put back 1), and if any of that +- * fails we start over. +- * +- * What makes this inherently bad is that we might temporarily grab a +- * resource incorrectly. Hopefully it'll be quick (and hey, it won't +- * block on wineserver) so nobody will notice. Besides, consider: if +- * object A becomes signaled but someone grabs it before we can grab it +- * and everything else, then they could just as well have grabbed it +- * before it became signaled. Similarly if object A was signaled and we +- * were blocking on object B, then B becomes available and someone grabs +- * A before we can, then they might have grabbed A before B became +- * signaled. In either case anyone who tries to wait on A or B will be +- * waiting for an instant while we put things back. */ +- +- while (1) +- { +-tryagain: +- /* First step: try to poll on each object in sequence. */ +- fds[0].events = POLLIN; +- pollcount = 1; +- if (alertable) +- { +- /* We also need to wait on APCs. */ +- fds[1].fd = ntdll_get_thread_data()->esync_apc_fd; +- fds[1].events = POLLIN; +- pollcount++; +- } +- for (i = 0; i < count; i++) +- { +- struct esync *obj = objs[i]; +- +- fds[0].fd = obj ? obj->fd : -1; +- +- if (obj && obj->type == ESYNC_MUTEX) +- { +- /* It might be ours. */ +- struct mutex *mutex = obj->shm; +- +- if (mutex->tid == GetCurrentThreadId()) +- continue; +- } +- +- ret = do_poll( fds, pollcount, timeout ? &end : NULL ); +- if (ret <= 0) +- goto err; +- else if (alertable && (fds[1].revents & POLLIN)) +- goto userapc; +- +- if (fds[0].revents & (POLLHUP | POLLERR | POLLNVAL)) +- { +- ERR("Polling on fd %d returned %#x.\n", fds[0].fd, fds[0].revents); +- return STATUS_INVALID_HANDLE; +- } +- } +- +- /* If we got here and we haven't timed out, that means all of the +- * handles were signaled. Check to make sure they still are. */ +- for (i = 0; i < count; i++) +- { +- fds[i].fd = objs[i] ? objs[i]->fd : -1; +- fds[i].events = POLLIN; +- } +- /* There's no reason to check for APCs here. */ +- pollcount = i; +- +- /* Poll everything to see if they're still signaled. */ +- ret = poll( fds, pollcount, 0 ); +- if (ret == pollcount) +- { +- BOOL abandoned = FALSE; +- +- /* Quick, grab everything. */ +- for (i = 0; i < count; i++) +- { +- struct esync *obj = objs[i]; +- +- switch (obj->type) +- { +- case ESYNC_MUTEX: +- { +- struct mutex *mutex = obj->shm; +- if (mutex->tid == GetCurrentThreadId()) +- break; +- /* otherwise fall through */ +- } +- case ESYNC_SEMAPHORE: +- case ESYNC_AUTO_EVENT: +- if ((size = read( fds[i].fd, &value, sizeof(value) )) != sizeof(value)) +- { +- /* We were too slow. Put everything back. */ +- value = 1; +- for (j = i - 1; j >= 0; j--) +- { +- struct esync *obj = objs[j]; +- +- if (obj->type == ESYNC_MUTEX) +- { +- struct mutex *mutex = obj->shm; +- +- if (mutex->tid == GetCurrentThreadId()) +- continue; +- } +- if (write( fds[j].fd, &value, sizeof(value) ) == -1) +- { +- ERR("write failed.\n"); +- return errno_to_status( errno ); +- } +- } +- +- goto tryagain; /* break out of two loops and a switch */ +- } +- break; +- default: +- /* If a manual-reset event changed between there and +- * here, it's shouldn't be a problem. */ +- break; +- } +- } +- +- /* If we got here, we successfully waited on every object. */ +- /* Make sure to let ourselves know that we grabbed the mutexes +- * and semaphores. */ +- for (i = 0; i < count; i++) +- abandoned |= update_grabbed_object( objs[i] ); +- +- if (abandoned) +- { +- TRACE("Wait successful, but some object(s) were abandoned.\n"); +- return STATUS_ABANDONED; +- } +- TRACE("Wait successful.\n"); +- return STATUS_SUCCESS; +- } +- +- /* If we got here, ppoll() returned less than all of our objects. +- * So loop back to the beginning and try again. */ +- } /* while(1) */ +- } /* else (wait-all) */ +- +-err: +- /* We should only get here if poll() failed. */ +- +- if (ret == 0) +- { +- TRACE("Wait timed out.\n"); +- return STATUS_TIMEOUT; +- } +- else +- { +- ERR("ppoll failed: %s\n", strerror(errno)); +- return errno_to_status( errno ); +- } +- +-userapc: +- TRACE("Woken up by user APC.\n"); +- +- /* We have to make a server call anyway to get the APC to execute, so just +- * delegate down to server_select(). */ +- ret = server_wait( NULL, 0, SELECT_INTERRUPTIBLE | SELECT_ALERTABLE, &zero ); +- +- /* This can happen if we received a system APC, and the APC fd was woken up +- * before we got SIGUSR1. poll() doesn't return EINTR in that case. The +- * right thing to do seems to be to return STATUS_USER_APC anyway. */ +- if (ret == STATUS_TIMEOUT) ret = STATUS_USER_APC; +- return ret; +-} +- +-/* We need to let the server know when we are doing a message wait, and when we +- * are done with one, so that all of the code surrounding hung queues works. +- * We also need this for WaitForInputIdle(). */ +-static void server_set_msgwait( int in_msgwait ) +-{ +- SERVER_START_REQ( esync_msgwait ) +- { +- req->in_msgwait = in_msgwait; +- wine_server_call( req ); +- } +- SERVER_END_REQ; +-} +- +-/* This is a very thin wrapper around the proper implementation above. The +- * purpose is to make sure the server knows when we are doing a message wait. +- * This is separated into a wrapper function since there are at least a dozen +- * exit paths from esync_wait_objects(). */ +-NTSTATUS esync_wait_objects( DWORD count, const HANDLE *handles, BOOLEAN wait_any, +- BOOLEAN alertable, const LARGE_INTEGER *timeout ) +-{ +- BOOL msgwait = FALSE; +- struct esync *obj; +- NTSTATUS ret; +- +- if (count && !get_object( handles[count - 1], &obj ) && obj->type == ESYNC_QUEUE) +- { +- msgwait = TRUE; +- server_set_msgwait( 1 ); +- } +- +- ret = __esync_wait_objects( count, handles, wait_any, alertable, timeout ); +- +- if (msgwait) +- server_set_msgwait( 0 ); +- +- return ret; +-} +- +-NTSTATUS esync_signal_and_wait( HANDLE signal, HANDLE wait, BOOLEAN alertable, +- const LARGE_INTEGER *timeout ) +-{ +- struct esync *obj; +- NTSTATUS ret; +- +- if ((ret = get_object( signal, &obj ))) return ret; +- +- switch (obj->type) +- { +- case ESYNC_SEMAPHORE: +- ret = esync_release_semaphore( signal, 1, NULL ); +- break; +- case ESYNC_AUTO_EVENT: +- case ESYNC_MANUAL_EVENT: +- ret = esync_set_event( signal ); +- break; +- case ESYNC_MUTEX: +- ret = esync_release_mutex( signal, NULL ); +- break; +- default: +- return STATUS_OBJECT_TYPE_MISMATCH; +- } +- if (ret) return ret; +- +- return esync_wait_objects( 1, &wait, TRUE, alertable, timeout ); +-} +- +-void esync_init(void) +-{ +- struct stat st; +- +- if (!do_esync()) +- { +- /* make sure the server isn't running with WINEESYNC */ +- HANDLE handle; +- NTSTATUS ret; +- +- ret = create_esync( 0, &handle, 0, NULL, 0, 0 ); +- if (ret != STATUS_NOT_IMPLEMENTED) +- { +- ERR("Server is running with WINEESYNC but this process is not, please enable WINEESYNC or restart wineserver.\n"); +- exit(1); +- } +- +- return; +- } +- +- if (stat( config_dir, &st ) == -1) +- ERR("Cannot stat %s\n", config_dir); +- +- if (st.st_ino != (unsigned long)st.st_ino) +- sprintf( shm_name, "/wine-%lx%08lx-esync", (unsigned long)((unsigned long long)st.st_ino >> 32), (unsigned long)st.st_ino ); +- else +- sprintf( shm_name, "/wine-%lx-esync", (unsigned long)st.st_ino ); +- +- if ((shm_fd = shm_open( shm_name, O_RDWR, 0644 )) == -1) +- { +- /* probably the server isn't running with WINEESYNC, tell the user and bail */ +- if (errno == ENOENT) +- ERR("Failed to open esync shared memory file; make sure no stale wineserver instances are running without WINEESYNC.\n"); +- else +- ERR("Failed to initialize shared memory: %s\n", strerror( errno )); +- exit(1); +- } +- +- pagesize = sysconf( _SC_PAGESIZE ); +- +- shm_addrs = calloc( 128, sizeof(shm_addrs[0]) ); +- shm_addrs_size = 128; +-} +diff --git a/dlls/ntdll/unix/esync.h b/dlls/ntdll/unix/esync.h +deleted file mode 100644 +index 59f8809..0000000 +--- a/dlls/ntdll/unix/esync.h ++++ /dev/null +@@ -1,61 +0,0 @@ +-/* +- * eventfd-based synchronization objects +- * +- * Copyright (C) 2018 Zebediah Figura +- * +- * This library is free software; you can redistribute it and/or +- * modify it under the terms of the GNU Lesser General Public +- * License as published by the Free Software Foundation; either +- * version 2.1 of the License, or (at your option) any later version. +- * +- * This library is distributed in the hope that it will be useful, +- * but WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- * Lesser General Public License for more details. +- * +- * You should have received a copy of the GNU Lesser General Public +- * License along with this library; if not, write to the Free Software +- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA +- */ +- +-extern int do_esync(void); +-extern void esync_init(void); +-extern NTSTATUS esync_close( HANDLE handle ); +- +-extern NTSTATUS esync_create_semaphore(HANDLE *handle, ACCESS_MASK access, +- const OBJECT_ATTRIBUTES *attr, LONG initial, LONG max); +-extern NTSTATUS esync_open_semaphore( HANDLE *handle, ACCESS_MASK access, +- const OBJECT_ATTRIBUTES *attr ); +-extern NTSTATUS esync_query_semaphore( HANDLE handle, void *info, ULONG *ret_len ); +-extern NTSTATUS esync_release_semaphore( HANDLE handle, ULONG count, ULONG *prev ); +- +-extern NTSTATUS esync_create_event( HANDLE *handle, ACCESS_MASK access, +- const OBJECT_ATTRIBUTES *attr, EVENT_TYPE type, BOOLEAN initial ); +-extern NTSTATUS esync_open_event( HANDLE *handle, ACCESS_MASK access, +- const OBJECT_ATTRIBUTES *attr ); +-extern NTSTATUS esync_pulse_event( HANDLE handle ); +-extern NTSTATUS esync_query_event( HANDLE handle, void *info, ULONG *ret_len ); +-extern NTSTATUS esync_reset_event( HANDLE handle ); +-extern NTSTATUS esync_set_event( HANDLE handle ); +- +-extern NTSTATUS esync_create_mutex( HANDLE *handle, ACCESS_MASK access, +- const OBJECT_ATTRIBUTES *attr, BOOLEAN initial ); +-extern NTSTATUS esync_open_mutex( HANDLE *handle, ACCESS_MASK access, +- const OBJECT_ATTRIBUTES *attr ); +-extern NTSTATUS esync_query_mutex( HANDLE handle, void *info, ULONG *ret_len ); +-extern NTSTATUS esync_release_mutex( HANDLE *handle, LONG *prev ); +- +-extern NTSTATUS esync_wait_objects( DWORD count, const HANDLE *handles, BOOLEAN wait_any, +- BOOLEAN alertable, const LARGE_INTEGER *timeout ); +-extern NTSTATUS esync_signal_and_wait( HANDLE signal, HANDLE wait, BOOLEAN alertable, +- const LARGE_INTEGER *timeout ); +- +- +-/* We have to synchronize on the fd cache mutex so that our calls to receive_fd +- * don't race with theirs. It looks weird, I know. +- * +- * If we weren't trying to avoid touching the code I'd rename the mutex to +- * "server_fd_mutex" or something similar. */ +-extern pthread_mutex_t fd_cache_mutex; +- +-extern int receive_fd( obj_handle_t *handle ); +diff --git a/dlls/ntdll/unix/file.c b/dlls/ntdll/unix/file.c +index 2bfb9b7..81564d7 100644 +--- a/dlls/ntdll/unix/file.c ++++ b/dlls/ntdll/unix/file.c +@@ -7507,7 +7507,7 @@ NTSTATUS WINAPI NtLockFile( HANDLE file, HANDLE event, PIO_APC_ROUTINE apc, void + } + if (handle) + { +- NtWaitForSingleObject( handle, FALSE, NULL ); ++ server_wait_for_object( handle, FALSE, NULL ); + NtClose( handle ); + } + else /* Unix lock conflict, sleep a bit and retry */ +diff --git a/dlls/ntdll/unix/loader.c b/dlls/ntdll/unix/loader.c +index 50d9e27..f163492 100644 +--- a/dlls/ntdll/unix/loader.c ++++ b/dlls/ntdll/unix/loader.c +@@ -88,7 +88,6 @@ extern char **environ; + #include "winioctl.h" + #include "winternl.h" + #include "unix_private.h" +-#include "esync.h" + #include "wine/list.h" + #include "ntsyscalls.h" + #include "wine/debug.h" +@@ -1853,7 +1852,6 @@ static void start_main_thread(void) + signal_alloc_thread( teb ); + dbg_init(); + startup_info_size = server_init_process(); +- esync_init(); + virtual_map_user_shared_data(); + init_cpu_info(); + init_files(); +diff --git a/dlls/ntdll/unix/process.c b/dlls/ntdll/unix/process.c +index 3591a3e..b744517 100644 +--- a/dlls/ntdll/unix/process.c ++++ b/dlls/ntdll/unix/process.c +@@ -921,7 +921,7 @@ NTSTATUS WINAPI NtCreateUserProcess( HANDLE *process_handle_ptr, HANDLE *thread_ + + /* wait for the new process info to be ready */ + +- NtWaitForSingleObject( process_info, FALSE, NULL ); ++ server_wait_for_object( process_info, FALSE, NULL ); + SERVER_START_REQ( get_new_process_info ) + { + req->info = wine_server_obj_handle( process_info ); +diff --git a/dlls/ntdll/unix/server.c b/dlls/ntdll/unix/server.c +index 0d4b320..dba7645 100644 +--- a/dlls/ntdll/unix/server.c ++++ b/dlls/ntdll/unix/server.c +@@ -83,7 +83,6 @@ + #include "wine/server.h" + #include "wine/debug.h" + #include "unix_private.h" +-#include "esync.h" + #include "ddk/wdm.h" + + WINE_DEFAULT_DEBUG_CHANNEL(server); +@@ -814,6 +813,21 @@ unsigned int server_wait( const select_op_t *select_op, data_size_t size, UINT f + } + + ++/* helper function to perform a server-side wait on an internal handle without ++ * using the fast synchronization path */ ++unsigned int server_wait_for_object( HANDLE handle, BOOL alertable, const LARGE_INTEGER *timeout ) ++{ ++ select_op_t select_op; ++ UINT flags = SELECT_INTERRUPTIBLE; ++ ++ if (alertable) flags |= SELECT_ALERTABLE; ++ ++ select_op.wait.op = SELECT_WAIT; ++ select_op.wait.handles[0] = wine_server_obj_handle( handle ); ++ return server_wait( &select_op, offsetof( select_op_t, wait.handles[1] ), flags, timeout ); ++} ++ ++ + /*********************************************************************** + * NtContinue (NTDLL.@) + */ +@@ -875,7 +889,7 @@ unsigned int server_queue_process_apc( HANDLE process, const apc_call_t *call, a + } + else + { +- NtWaitForSingleObject( handle, FALSE, NULL ); ++ server_wait_for_object( handle, FALSE, NULL ); + + SERVER_START_REQ( get_apc_result ) + { +@@ -947,7 +961,7 @@ void wine_server_send_fd( int fd ) + * + * Receive a file descriptor passed from the server. + */ +-int receive_fd( obj_handle_t *handle ) ++static int receive_fd( obj_handle_t *handle ) + { + struct iovec vec; + struct msghdr msghdr; +@@ -1826,12 +1840,17 @@ NTSTATUS WINAPI NtDuplicateObject( HANDLE source_process, HANDLE source, HANDLE + return result.dup_handle.status; + } + ++ /* hold fd_cache_mutex to prevent the fd from being added again between the ++ * call to remove_fd_from_cache and close_handle */ + server_enter_uninterrupted_section( &fd_cache_mutex, &sigset ); + + /* always remove the cached fd; if the server request fails we'll just + * retrieve it again */ + if (options & DUPLICATE_CLOSE_SOURCE) ++ { + fd = remove_fd_from_cache( source ); ++ close_fast_sync_obj( source ); ++ } + + SERVER_START_REQ( dup_handle ) + { +@@ -1897,14 +1916,15 @@ NTSTATUS WINAPI NtClose( HANDLE handle ) + if (HandleToLong( handle ) >= ~5 && HandleToLong( handle ) <= ~0) + return STATUS_SUCCESS; + ++ /* hold fd_cache_mutex to prevent the fd from being added again between the ++ * call to remove_fd_from_cache and close_handle */ + server_enter_uninterrupted_section( &fd_cache_mutex, &sigset ); + + /* always remove the cached fd; if the server request fails we'll just + * retrieve it again */ + fd = remove_fd_from_cache( handle ); + +- if (do_esync()) +- esync_close( handle ); ++ close_fast_sync_obj( handle ); + + SERVER_START_REQ( close_handle ) + { +diff --git a/dlls/ntdll/unix/sync.c b/dlls/ntdll/unix/sync.c +index 1072265..a9a0a2c 100644 +--- a/dlls/ntdll/unix/sync.c ++++ b/dlls/ntdll/unix/sync.c +@@ -30,9 +30,11 @@ + #include + #include + #include ++#include + #include + #include + #include ++#include + #include + #ifdef HAVE_SYS_SYSCALL_H + #include +@@ -45,6 +47,7 @@ + #endif + #include + #include ++#include + #include + #include + #include +@@ -54,6 +57,9 @@ + #ifdef HAVE_KQUEUE + # include + #endif ++#ifdef HAVE_LINUX_NTSYNC_H ++# include ++#endif + + #include "ntstatus.h" + #define WIN32_NO_STATUS +@@ -63,7 +69,6 @@ + #include "wine/server.h" + #include "wine/debug.h" + #include "unix_private.h" +-#include "esync.h" + + WINE_DEFAULT_DEBUG_CHANNEL(sync); + +@@ -72,9 +77,11 @@ HANDLE keyed_event = 0; + static const char *debugstr_timeout( const LARGE_INTEGER *timeout ) + { + if (!timeout) return "(infinite)"; +- return wine_dbgstr_longlong( timeout->QuadPart ); ++ return wine_dbg_sprintf( "%lld.%07ld", (long long)(timeout->QuadPart / TICKSPERSEC), ++ (long)(timeout->QuadPart % TICKSPERSEC) ); + } + ++ + /* return a monotonic time counter, in Win32 ticks */ + static inline ULONGLONG monotonic_counter(void) + { +@@ -239,6 +246,902 @@ static unsigned int validate_open_object_attributes( const OBJECT_ATTRIBUTES *at + } + + ++#ifdef HAVE_LINUX_NTSYNC_H ++ ++static int get_linux_sync_device(void) ++{ ++ static LONG fast_sync_fd = -2; ++ ++ if (fast_sync_fd == -2) ++ { ++ HANDLE device; ++ int fd, needs_close; ++ NTSTATUS ret; ++ ++ SERVER_START_REQ( get_linux_sync_device ) ++ { ++ if (!(ret = wine_server_call( req ))) device = wine_server_ptr_handle( reply->handle ); ++ } ++ SERVER_END_REQ; ++ ++ if (!ret) ++ { ++ if (!server_get_unix_fd( device, 0, &fd, &needs_close, NULL, NULL )) ++ { ++ if (InterlockedCompareExchange( &fast_sync_fd, fd, -2 ) != -2) ++ { ++ /* someone beat us to it */ ++ if (needs_close) close( fd ); ++ NtClose( device ); ++ } ++ /* otherwise don't close the device */ ++ } ++ else ++ { ++ InterlockedCompareExchange( &fast_sync_fd, -1, -2 ); ++ NtClose( device ); ++ } ++ } ++ else ++ { ++ InterlockedCompareExchange( &fast_sync_fd, -1, -2 ); ++ } ++ } ++ return fast_sync_fd; ++} ++ ++/* It's possible for synchronization primitives to remain alive even after being ++ * closed, because a thread is still waiting on them. It's rare in practice, and ++ * documented as being undefined behaviour by Microsoft, but it works, and some ++ * applications rely on it. This means we need to refcount handles, and defer ++ * deleting them on the server side until the refcount reaches zero. We do this ++ * by having each client process hold a handle to the fast synchronization ++ * object, as well as a private refcount. When the client refcount reaches zero, ++ * it closes the handle; when all handles are closed, the server deletes the ++ * fast synchronization object. ++ * ++ * We also need this for signal-and-wait. The signal and wait operations aren't ++ * atomic, but we can't perform the signal and then return STATUS_INVALID_HANDLE ++ * for the wait—we need to either do both operations or neither. That means we ++ * need to grab references to both objects, and prevent them from being ++ * destroyed before we're done with them. ++ * ++ * We want lookup of objects from the cache to be very fast; ideally, it should ++ * be lock-free. We achieve this by using atomic modifications to "refcount", ++ * and guaranteeing that all other fields are valid and correct *as long as* ++ * refcount is nonzero, and we store the entire structure in memory which will ++ * never be freed. ++ * ++ * This means that acquiring the object can't use a simple atomic increment; it ++ * has to use a compare-and-swap loop to ensure that it doesn't try to increment ++ * an object with a zero refcount. That's still leagues better than a real lock, ++ * though, and release can be a single atomic decrement. ++ * ++ * It also means that threads modifying the cache need to take a lock, to ++ * prevent other threads from writing to it concurrently. ++ * ++ * It's possible for an object currently in use (by a waiter) to be closed and ++ * the same handle immediately reallocated to a different object. This should be ++ * a very rare situation, and in that case we simply don't cache the handle. ++ */ ++struct fast_sync_cache_entry ++{ ++ LONG refcount; ++ int fd; ++ enum fast_sync_type type; ++ unsigned int access; ++ BOOL closed; ++ /* handle to the underlying fast sync object, stored as obj_handle_t to save ++ * space */ ++ obj_handle_t handle; ++}; ++ ++ ++static void release_fast_sync_obj( struct fast_sync_cache_entry *cache ) ++{ ++ /* save the handle and fd now; as soon as the refcount hits 0 we cannot ++ * access the cache anymore */ ++ HANDLE handle = wine_server_ptr_handle( cache->handle ); ++ int fd = cache->fd; ++ LONG refcount = InterlockedDecrement( &cache->refcount ); ++ ++ assert( refcount >= 0 ); ++ ++ if (!refcount) ++ { ++ NTSTATUS ret; ++ ++ /* we can't call NtClose here as we may be inside fd_cache_mutex */ ++ SERVER_START_REQ( close_handle ) ++ { ++ req->handle = wine_server_obj_handle( handle ); ++ ret = wine_server_call( req ); ++ } ++ SERVER_END_REQ; ++ ++ assert( !ret ); ++ close( fd ); ++ } ++} ++ ++ ++#define FAST_SYNC_CACHE_BLOCK_SIZE (65536 / sizeof(struct fast_sync_cache_entry)) ++#define FAST_SYNC_CACHE_ENTRIES 128 ++ ++static struct fast_sync_cache_entry *fast_sync_cache[FAST_SYNC_CACHE_ENTRIES]; ++static struct fast_sync_cache_entry fast_sync_cache_initial_block[FAST_SYNC_CACHE_BLOCK_SIZE]; ++ ++static inline unsigned int fast_sync_handle_to_index( HANDLE handle, unsigned int *entry ) ++{ ++ unsigned int idx = (wine_server_obj_handle(handle) >> 2) - 1; ++ *entry = idx / FAST_SYNC_CACHE_BLOCK_SIZE; ++ return idx % FAST_SYNC_CACHE_BLOCK_SIZE; ++} ++ ++ ++static struct fast_sync_cache_entry *cache_fast_sync_obj( HANDLE handle, obj_handle_t fast_sync, int fd, ++ enum fast_sync_type type, unsigned int access ) ++{ ++ unsigned int entry, idx = fast_sync_handle_to_index( handle, &entry ); ++ struct fast_sync_cache_entry *cache; ++ sigset_t sigset; ++ int refcount; ++ ++ if (entry >= FAST_SYNC_CACHE_ENTRIES) ++ { ++ FIXME( "too many allocated handles, not caching %p\n", handle ); ++ return NULL; ++ } ++ ++ if (!fast_sync_cache[entry]) /* do we need to allocate a new block of entries? */ ++ { ++ if (!entry) fast_sync_cache[0] = fast_sync_cache_initial_block; ++ else ++ { ++ static const size_t size = FAST_SYNC_CACHE_BLOCK_SIZE * sizeof(struct fast_sync_cache_entry); ++ void *ptr = anon_mmap_alloc( size, PROT_READ | PROT_WRITE ); ++ if (ptr == MAP_FAILED) return NULL; ++ if (InterlockedCompareExchangePointer( (void **)&fast_sync_cache[entry], ptr, NULL )) ++ munmap( ptr, size ); /* someone beat us to it */ ++ } ++ } ++ ++ cache = &fast_sync_cache[entry][idx]; ++ ++ /* Hold fd_cache_mutex instead of a separate mutex, to prevent the same ++ * race between this function and NtClose. That is, prevent the object from ++ * being cached again between close_fast_sync_obj() and close_handle. */ ++ server_enter_uninterrupted_section( &fd_cache_mutex, &sigset ); ++ ++ if (InterlockedCompareExchange( &cache->refcount, 0, 0 )) ++ { ++ /* We lost the race with another thread trying to cache this object, or ++ * the handle is currently being used for another object (i.e. it was ++ * closed and then reused). We have no way of knowing which, and in the ++ * latter case we can't cache this object until the old one is ++ * completely destroyed, so always return failure. */ ++ server_leave_uninterrupted_section( &fd_cache_mutex, &sigset ); ++ return NULL; ++ } ++ ++ cache->handle = fast_sync; ++ cache->fd = fd; ++ cache->type = type; ++ cache->access = access; ++ cache->closed = FALSE; ++ /* Make sure we set the other members before the refcount; this store needs ++ * release semantics [paired with the load in get_cached_fast_sync_obj()]. ++ * Set the refcount to 2 (one for the handle, one for the caller). */ ++ refcount = InterlockedExchange( &cache->refcount, 2 ); ++ assert( !refcount ); ++ ++ server_leave_uninterrupted_section( &fd_cache_mutex, &sigset ); ++ ++ return cache; ++} ++ ++ ++/* returns the previous value */ ++static inline LONG interlocked_inc_if_nonzero( LONG *dest ) ++{ ++ LONG val, tmp; ++ for (val = *dest;; val = tmp) ++ { ++ if (!val || (tmp = InterlockedCompareExchange( dest, val + 1, val )) == val) ++ break; ++ } ++ return val; ++} ++ ++ ++static struct fast_sync_cache_entry *get_cached_fast_sync_obj( HANDLE handle ) ++{ ++ unsigned int entry, idx = fast_sync_handle_to_index( handle, &entry ); ++ struct fast_sync_cache_entry *cache; ++ ++ if (entry >= FAST_SYNC_CACHE_ENTRIES || !fast_sync_cache[entry]) ++ return NULL; ++ ++ cache = &fast_sync_cache[entry][idx]; ++ ++ /* this load needs acquire semantics [paired with the store in ++ * cache_fast_sync_obj()] */ ++ if (!interlocked_inc_if_nonzero( &cache->refcount )) ++ return NULL; ++ ++ if (cache->closed) ++ { ++ /* The object is still being used, but "handle" has been closed. The ++ * handle value might have been reused for another object in the ++ * meantime, in which case we have to report that valid object, so ++ * force the caller to check the server. */ ++ release_fast_sync_obj( cache ); ++ return NULL; ++ } ++ ++ return cache; ++} ++ ++ ++static BOOL fast_sync_types_match( enum fast_sync_type a, enum fast_sync_type b ) ++{ ++ if (a == b) return TRUE; ++ if (a == FAST_SYNC_AUTO_EVENT && b == FAST_SYNC_MANUAL_EVENT) return TRUE; ++ if (b == FAST_SYNC_AUTO_EVENT && a == FAST_SYNC_MANUAL_EVENT) return TRUE; ++ return FALSE; ++} ++ ++ ++/* returns a pointer to a cache entry; if the object could not be cached, ++ * returns "stack_cache" instead, which should be allocated on stack */ ++static NTSTATUS get_fast_sync_obj( HANDLE handle, enum fast_sync_type desired_type, ACCESS_MASK desired_access, ++ struct fast_sync_cache_entry *stack_cache, ++ struct fast_sync_cache_entry **ret_cache ) ++{ ++ struct fast_sync_cache_entry *cache; ++ obj_handle_t fast_sync_handle; ++ enum fast_sync_type type; ++ unsigned int access; ++ int fd, needs_close; ++ NTSTATUS ret; ++ ++ /* try to find it in the cache already */ ++ if ((cache = get_cached_fast_sync_obj( handle ))) ++ { ++ *ret_cache = cache; ++ return STATUS_SUCCESS; ++ } ++ ++ /* try to retrieve it from the server */ ++ SERVER_START_REQ( get_linux_sync_obj ) ++ { ++ req->handle = wine_server_obj_handle( handle ); ++ if (!(ret = wine_server_call( req ))) ++ { ++ fast_sync_handle = reply->handle; ++ access = reply->access; ++ type = reply->type; ++ } ++ } ++ SERVER_END_REQ; ++ ++ if (ret) return ret; ++ ++ if ((ret = server_get_unix_fd( wine_server_ptr_handle( fast_sync_handle ), ++ 0, &fd, &needs_close, NULL, NULL ))) ++ return ret; ++ ++ cache = cache_fast_sync_obj( handle, fast_sync_handle, fd, type, access ); ++ if (!cache) ++ { ++ cache = stack_cache; ++ cache->handle = fast_sync_handle; ++ cache->fd = fd; ++ cache->type = type; ++ cache->access = access; ++ cache->closed = FALSE; ++ cache->refcount = 1; ++ } ++ ++ *ret_cache = cache; ++ ++ if (desired_type && !fast_sync_types_match( cache->type, desired_type )) ++ { ++ release_fast_sync_obj( cache ); ++ return STATUS_OBJECT_TYPE_MISMATCH; ++ } ++ ++ if ((cache->access & desired_access) != desired_access) ++ { ++ release_fast_sync_obj( cache ); ++ return STATUS_ACCESS_DENIED; ++ } ++ ++ return STATUS_SUCCESS; ++} ++ ++ ++/* caller must hold fd_cache_mutex */ ++void close_fast_sync_obj( HANDLE handle ) ++{ ++ struct fast_sync_cache_entry *cache = get_cached_fast_sync_obj( handle ); ++ ++ if (cache) ++ { ++ cache->closed = TRUE; ++ /* once for the reference we just grabbed, and once for the handle */ ++ release_fast_sync_obj( cache ); ++ release_fast_sync_obj( cache ); ++ } ++} ++ ++ ++static NTSTATUS linux_release_semaphore_obj( int obj, ULONG count, ULONG *prev_count ) ++{ ++ NTSTATUS ret; ++ ++ ret = ioctl( obj, NTSYNC_IOC_SEM_POST, &count ); ++ if (ret < 0) ++ { ++ if (errno == EOVERFLOW) ++ return STATUS_SEMAPHORE_LIMIT_EXCEEDED; ++ else ++ return errno_to_status( errno ); ++ } ++ if (prev_count) *prev_count = count; ++ return STATUS_SUCCESS; ++} ++ ++ ++static NTSTATUS fast_release_semaphore( HANDLE handle, ULONG count, ULONG *prev_count ) ++{ ++ struct fast_sync_cache_entry stack_cache, *cache; ++ NTSTATUS ret; ++ ++ if ((ret = get_fast_sync_obj( handle, FAST_SYNC_SEMAPHORE, ++ SEMAPHORE_MODIFY_STATE, &stack_cache, &cache ))) ++ return ret; ++ ++ ret = linux_release_semaphore_obj( cache->fd, count, prev_count ); ++ ++ release_fast_sync_obj( cache ); ++ return ret; ++} ++ ++ ++static NTSTATUS linux_query_semaphore_obj( int obj, SEMAPHORE_BASIC_INFORMATION *info ) ++{ ++ struct ntsync_sem_args args = {0}; ++ NTSTATUS ret; ++ ++ ret = ioctl( obj, NTSYNC_IOC_SEM_READ, &args ); ++ if (ret < 0) ++ return errno_to_status( errno ); ++ info->CurrentCount = args.count; ++ info->MaximumCount = args.max; ++ return STATUS_SUCCESS; ++} ++ ++ ++static NTSTATUS fast_query_semaphore( HANDLE handle, SEMAPHORE_BASIC_INFORMATION *info ) ++{ ++ struct fast_sync_cache_entry stack_cache, *cache; ++ NTSTATUS ret; ++ ++ if ((ret = get_fast_sync_obj( handle, FAST_SYNC_SEMAPHORE, ++ SEMAPHORE_QUERY_STATE, &stack_cache, &cache ))) ++ return ret; ++ ++ ret = linux_query_semaphore_obj( cache->fd, info ); ++ ++ release_fast_sync_obj( cache ); ++ return ret; ++} ++ ++ ++static NTSTATUS linux_set_event_obj( int obj, LONG *prev_state ) ++{ ++ NTSTATUS ret; ++ __u32 prev; ++ ++ ret = ioctl( obj, NTSYNC_IOC_EVENT_SET, &prev ); ++ if (ret < 0) ++ return errno_to_status( errno ); ++ if (prev_state) *prev_state = prev; ++ return STATUS_SUCCESS; ++} ++ ++ ++static NTSTATUS fast_set_event( HANDLE handle, LONG *prev_state ) ++{ ++ struct fast_sync_cache_entry stack_cache, *cache; ++ NTSTATUS ret; ++ ++ if ((ret = get_fast_sync_obj( handle, FAST_SYNC_AUTO_EVENT, ++ EVENT_MODIFY_STATE, &stack_cache, &cache ))) ++ return ret; ++ ++ ret = linux_set_event_obj( cache->fd, prev_state ); ++ ++ release_fast_sync_obj( cache ); ++ return ret; ++} ++ ++ ++static NTSTATUS linux_reset_event_obj( int obj, LONG *prev_state ) ++{ ++ NTSTATUS ret; ++ __u32 prev; ++ ++ ret = ioctl( obj, NTSYNC_IOC_EVENT_RESET, &prev ); ++ if (ret < 0) ++ return errno_to_status( errno ); ++ if (prev_state) *prev_state = prev; ++ return STATUS_SUCCESS; ++} ++ ++ ++static NTSTATUS fast_reset_event( HANDLE handle, LONG *prev_state ) ++{ ++ struct fast_sync_cache_entry stack_cache, *cache; ++ NTSTATUS ret; ++ ++ if ((ret = get_fast_sync_obj( handle, FAST_SYNC_AUTO_EVENT, ++ EVENT_MODIFY_STATE, &stack_cache, &cache ))) ++ return ret; ++ ++ ret = linux_reset_event_obj( cache->fd, prev_state ); ++ ++ release_fast_sync_obj( cache ); ++ return ret; ++} ++ ++ ++static NTSTATUS linux_pulse_event_obj( int obj, LONG *prev_state ) ++{ ++ NTSTATUS ret; ++ __u32 prev; ++ ++ ret = ioctl( obj, NTSYNC_IOC_EVENT_PULSE, &prev ); ++ if (ret < 0) ++ return errno_to_status( errno ); ++ if (prev_state) *prev_state = prev; ++ return STATUS_SUCCESS; ++} ++ ++ ++static NTSTATUS fast_pulse_event( HANDLE handle, LONG *prev_state ) ++{ ++ struct fast_sync_cache_entry stack_cache, *cache; ++ NTSTATUS ret; ++ ++ if ((ret = get_fast_sync_obj( handle, FAST_SYNC_AUTO_EVENT, ++ EVENT_MODIFY_STATE, &stack_cache, &cache ))) ++ return ret; ++ ++ ret = linux_pulse_event_obj( cache->fd, prev_state ); ++ ++ release_fast_sync_obj( cache ); ++ return ret; ++} ++ ++ ++static NTSTATUS linux_query_event_obj( int obj, enum fast_sync_type type, EVENT_BASIC_INFORMATION *info ) ++{ ++ struct ntsync_event_args args = {0}; ++ NTSTATUS ret; ++ ++ ret = ioctl( obj, NTSYNC_IOC_EVENT_READ, &args ); ++ if (ret < 0) ++ return errno_to_status( errno ); ++ info->EventType = (type == FAST_SYNC_AUTO_EVENT) ? SynchronizationEvent : NotificationEvent; ++ info->EventState = args.signaled; ++ return STATUS_SUCCESS; ++} ++ ++ ++static NTSTATUS fast_query_event( HANDLE handle, EVENT_BASIC_INFORMATION *info ) ++{ ++ struct fast_sync_cache_entry stack_cache, *cache; ++ NTSTATUS ret; ++ ++ if ((ret = get_fast_sync_obj( handle, FAST_SYNC_AUTO_EVENT, ++ EVENT_QUERY_STATE, &stack_cache, &cache ))) ++ return ret; ++ ++ ret = linux_query_event_obj( cache->fd, cache->type, info ); ++ ++ release_fast_sync_obj( cache ); ++ return ret; ++} ++ ++ ++static NTSTATUS linux_release_mutex_obj( int obj, LONG *prev_count ) ++{ ++ struct ntsync_mutex_args args = {0}; ++ NTSTATUS ret; ++ ++ args.owner = GetCurrentThreadId(); ++ ret = ioctl( obj, NTSYNC_IOC_MUTEX_UNLOCK, &args ); ++ ++ if (ret < 0) ++ { ++ if (errno == EOVERFLOW) ++ return STATUS_MUTANT_LIMIT_EXCEEDED; ++ else if (errno == EPERM) ++ return STATUS_MUTANT_NOT_OWNED; ++ else ++ return errno_to_status( errno ); ++ } ++ if (prev_count) *prev_count = 1 - args.count; ++ return STATUS_SUCCESS; ++} ++ ++ ++static NTSTATUS fast_release_mutex( HANDLE handle, LONG *prev_count ) ++{ ++ struct fast_sync_cache_entry stack_cache, *cache; ++ NTSTATUS ret; ++ ++ if ((ret = get_fast_sync_obj( handle, FAST_SYNC_MUTEX, 0, &stack_cache, &cache ))) ++ return ret; ++ ++ ret = linux_release_mutex_obj( cache->fd, prev_count ); ++ ++ release_fast_sync_obj( cache ); ++ return ret; ++} ++ ++ ++static NTSTATUS linux_query_mutex_obj( int obj, MUTANT_BASIC_INFORMATION *info ) ++{ ++ struct ntsync_mutex_args args = {0}; ++ NTSTATUS ret; ++ ++ ret = ioctl( obj, NTSYNC_IOC_MUTEX_READ, &args ); ++ ++ if (ret < 0) ++ { ++ if (errno == EOWNERDEAD) ++ { ++ info->AbandonedState = TRUE; ++ info->OwnedByCaller = FALSE; ++ info->CurrentCount = 1; ++ return STATUS_SUCCESS; ++ } ++ else ++ return errno_to_status( errno ); ++ } ++ info->AbandonedState = FALSE; ++ info->OwnedByCaller = (args.owner == GetCurrentThreadId()); ++ info->CurrentCount = 1 - args.count; ++ return STATUS_SUCCESS; ++} ++ ++ ++static NTSTATUS fast_query_mutex( HANDLE handle, MUTANT_BASIC_INFORMATION *info ) ++{ ++ struct fast_sync_cache_entry stack_cache, *cache; ++ NTSTATUS ret; ++ ++ if ((ret = get_fast_sync_obj( handle, FAST_SYNC_MUTEX, MUTANT_QUERY_STATE, ++ &stack_cache, &cache ))) ++ return ret; ++ ++ ret = linux_query_mutex_obj( cache->fd, info ); ++ ++ release_fast_sync_obj( cache ); ++ return ret; ++} ++ ++static void select_queue( HANDLE queue ) ++{ ++ SERVER_START_REQ( fast_select_queue ) ++ { ++ req->handle = wine_server_obj_handle( queue ); ++ wine_server_call( req ); ++ } ++ SERVER_END_REQ; ++} ++ ++static void unselect_queue( HANDLE queue, BOOL signaled ) ++{ ++ SERVER_START_REQ( fast_unselect_queue ) ++ { ++ req->handle = wine_server_obj_handle( queue ); ++ req->signaled = signaled; ++ wine_server_call( req ); ++ } ++ SERVER_END_REQ; ++} ++ ++static int get_fast_alert_obj(void) ++{ ++ struct ntdll_thread_data *data = ntdll_get_thread_data(); ++ struct fast_sync_cache_entry stack_cache, *cache; ++ HANDLE alert_handle; ++ unsigned int ret; ++ ++ if (!data->fast_alert_obj) ++ { ++ SERVER_START_REQ( get_fast_alert_event ) ++ { ++ if ((ret = wine_server_call( req ))) ++ ERR( "failed to get fast alert event, status %#x\n", ret ); ++ alert_handle = wine_server_ptr_handle( reply->handle ); ++ } ++ SERVER_END_REQ; ++ ++ if ((ret = get_fast_sync_obj( alert_handle, 0, SYNCHRONIZE, &stack_cache, &cache ))) ++ ERR( "failed to get fast alert obj, status %#x\n", ret ); ++ data->fast_alert_obj = cache->fd; ++ /* Set the fd to -1 so release_fast_sync_obj() won't close it. ++ * Manhandling the cache entry here is fine since we're the only thread ++ * that can access our own alert event. */ ++ cache->fd = -1; ++ release_fast_sync_obj( cache ); ++ NtClose( alert_handle ); ++ } ++ ++ return data->fast_alert_obj; ++} ++ ++static NTSTATUS linux_wait_objs( int device, const DWORD count, const int *objs, ++ BOOLEAN wait_any, BOOLEAN alertable, const LARGE_INTEGER *timeout ) ++{ ++ struct ntsync_wait_args args = {0}; ++ unsigned long request; ++ struct timespec now; ++ int ret; ++ ++ if (!timeout || timeout->QuadPart == TIMEOUT_INFINITE) ++ { ++ args.timeout = ~(__u64)0; ++ } ++ else if (timeout->QuadPart <= 0) ++ { ++ clock_gettime( CLOCK_MONOTONIC, &now ); ++ args.timeout = (now.tv_sec * NSECPERSEC) + now.tv_nsec + (-timeout->QuadPart * 100); ++ } ++ else ++ { ++ args.timeout = (timeout->QuadPart * 100) - (SECS_1601_TO_1970 * NSECPERSEC); ++ args.flags |= NTSYNC_WAIT_REALTIME; ++ } ++ ++ args.objs = (uintptr_t)objs; ++ args.count = count; ++ args.owner = GetCurrentThreadId(); ++ args.index = ~0u; ++ ++ if (alertable) ++ args.alert = get_fast_alert_obj(); ++ ++ if (wait_any || count == 1) ++ request = NTSYNC_IOC_WAIT_ANY; ++ else ++ request = NTSYNC_IOC_WAIT_ALL; ++ ++ do ++ { ++ ret = ioctl( device, request, &args ); ++ } while (ret < 0 && errno == EINTR); ++ ++ if (!ret) ++ { ++ if (args.index == count) ++ { ++ static const LARGE_INTEGER timeout; ++ ++ ret = server_wait( NULL, 0, SELECT_INTERRUPTIBLE | SELECT_ALERTABLE, &timeout ); ++ assert( ret == STATUS_USER_APC ); ++ return ret; ++ } ++ ++ return wait_any ? args.index : 0; ++ } ++ else if (errno == EOWNERDEAD) ++ return STATUS_ABANDONED + (wait_any ? args.index : 0); ++ else if (errno == ETIMEDOUT) ++ return STATUS_TIMEOUT; ++ else ++ return errno_to_status( errno ); ++} ++ ++static NTSTATUS fast_wait( DWORD count, const HANDLE *handles, BOOLEAN wait_any, ++ BOOLEAN alertable, const LARGE_INTEGER *timeout ) ++{ ++ struct fast_sync_cache_entry stack_cache[64], *cache[64]; ++ int device, objs[64]; ++ HANDLE queue = NULL; ++ NTSTATUS ret; ++ DWORD i, j; ++ ++ if ((device = get_linux_sync_device()) < 0) ++ return STATUS_NOT_IMPLEMENTED; ++ ++ for (i = 0; i < count; ++i) ++ { ++ if ((ret = get_fast_sync_obj( handles[i], 0, SYNCHRONIZE, &stack_cache[i], &cache[i] ))) ++ { ++ for (j = 0; j < i; ++j) ++ release_fast_sync_obj( cache[j] ); ++ return ret; ++ } ++ if (cache[i]->type == FAST_SYNC_QUEUE) ++ queue = handles[i]; ++ ++ objs[i] = cache[i]->fd; ++ } ++ ++ /* It's common to wait on the message queue alone. Some applications wait ++ * on it in fast paths, with a zero timeout. Since we take two server calls ++ * instead of one when going through fast_wait_objs(), and since we only ++ * need to go through that path if we're waiting on other objects, just ++ * delegate to the server if we're only waiting on the message queue. */ ++ if (count == 1 && queue) ++ { ++ release_fast_sync_obj( cache[0] ); ++ return server_wait_for_object( handles[0], alertable, timeout ); ++ } ++ ++ if (queue) select_queue( queue ); ++ ++ ret = linux_wait_objs( device, count, objs, wait_any, alertable, timeout ); ++ ++ if (queue) unselect_queue( queue, handles[ret] == queue ); ++ ++ for (i = 0; i < count; ++i) ++ release_fast_sync_obj( cache[i] ); ++ ++ return ret; ++} ++ ++static NTSTATUS fast_signal_and_wait( HANDLE signal, HANDLE wait, ++ BOOLEAN alertable, const LARGE_INTEGER *timeout ) ++{ ++ struct fast_sync_cache_entry signal_stack_cache, *signal_cache; ++ struct fast_sync_cache_entry wait_stack_cache, *wait_cache; ++ HANDLE queue = NULL; ++ NTSTATUS ret; ++ int device; ++ ++ if ((device = get_linux_sync_device()) < 0) ++ return STATUS_NOT_IMPLEMENTED; ++ ++ if ((ret = get_fast_sync_obj( signal, 0, 0, &signal_stack_cache, &signal_cache ))) ++ return ret; ++ ++ switch (signal_cache->type) ++ { ++ case FAST_SYNC_SEMAPHORE: ++ if (!(signal_cache->access & SEMAPHORE_MODIFY_STATE)) ++ { ++ release_fast_sync_obj( signal_cache ); ++ return STATUS_ACCESS_DENIED; ++ } ++ break; ++ ++ case FAST_SYNC_AUTO_EVENT: ++ case FAST_SYNC_MANUAL_EVENT: ++ if (!(signal_cache->access & EVENT_MODIFY_STATE)) ++ { ++ release_fast_sync_obj( signal_cache ); ++ return STATUS_ACCESS_DENIED; ++ } ++ break; ++ ++ case FAST_SYNC_MUTEX: ++ break; ++ ++ default: ++ /* can't be signaled */ ++ release_fast_sync_obj( signal_cache ); ++ return STATUS_OBJECT_TYPE_MISMATCH; ++ } ++ ++ if ((ret = get_fast_sync_obj( wait, 0, SYNCHRONIZE, &wait_stack_cache, &wait_cache ))) ++ { ++ release_fast_sync_obj( signal_cache ); ++ return ret; ++ } ++ ++ if (wait_cache->type == FAST_SYNC_QUEUE) ++ queue = wait; ++ ++ switch (signal_cache->type) ++ { ++ case FAST_SYNC_SEMAPHORE: ++ ret = linux_release_semaphore_obj( signal_cache->fd, 1, NULL ); ++ break; ++ ++ case FAST_SYNC_AUTO_EVENT: ++ case FAST_SYNC_MANUAL_EVENT: ++ ret = linux_set_event_obj( signal_cache->fd, NULL ); ++ break; ++ ++ case FAST_SYNC_MUTEX: ++ ret = linux_release_mutex_obj( signal_cache->fd, NULL ); ++ break; ++ ++ default: ++ assert( 0 ); ++ break; ++ } ++ ++ if (!ret) ++ { ++ if (queue) select_queue( queue ); ++ ret = linux_wait_objs( device, 1, &wait_cache->fd, TRUE, alertable, timeout ); ++ if (queue) unselect_queue( queue, !ret ); ++ } ++ ++ release_fast_sync_obj( signal_cache ); ++ release_fast_sync_obj( wait_cache ); ++ return ret; ++} ++ ++#else ++ ++void close_fast_sync_obj( HANDLE handle ) ++{ ++} ++ ++static NTSTATUS fast_release_semaphore( HANDLE handle, ULONG count, ULONG *prev_count ) ++{ ++ return STATUS_NOT_IMPLEMENTED; ++} ++ ++static NTSTATUS fast_query_semaphore( HANDLE handle, SEMAPHORE_BASIC_INFORMATION *info ) ++{ ++ return STATUS_NOT_IMPLEMENTED; ++} ++ ++static NTSTATUS fast_set_event( HANDLE handle, LONG *prev_state ) ++{ ++ return STATUS_NOT_IMPLEMENTED; ++} ++ ++static NTSTATUS fast_reset_event( HANDLE handle, LONG *prev_state ) ++{ ++ return STATUS_NOT_IMPLEMENTED; ++} ++ ++static NTSTATUS fast_pulse_event( HANDLE handle, LONG *prev_state ) ++{ ++ return STATUS_NOT_IMPLEMENTED; ++} ++ ++static NTSTATUS fast_query_event( HANDLE handle, EVENT_BASIC_INFORMATION *info ) ++{ ++ return STATUS_NOT_IMPLEMENTED; ++} ++ ++static NTSTATUS fast_release_mutex( HANDLE handle, LONG *prev_count ) ++{ ++ return STATUS_NOT_IMPLEMENTED; ++} ++ ++static NTSTATUS fast_query_mutex( HANDLE handle, MUTANT_BASIC_INFORMATION *info ) ++{ ++ return STATUS_NOT_IMPLEMENTED; ++} ++ ++static NTSTATUS fast_wait( DWORD count, const HANDLE *handles, BOOLEAN wait_any, ++ BOOLEAN alertable, const LARGE_INTEGER *timeout ) ++{ ++ return STATUS_NOT_IMPLEMENTED; ++} ++ ++static NTSTATUS fast_signal_and_wait( HANDLE signal, HANDLE wait, ++ BOOLEAN alertable, const LARGE_INTEGER *timeout ) ++{ ++ return STATUS_NOT_IMPLEMENTED; ++} ++ ++#endif ++ ++ + /****************************************************************************** + * NtCreateSemaphore (NTDLL.@) + */ +@@ -249,13 +1152,13 @@ NTSTATUS WINAPI NtCreateSemaphore( HANDLE *handle, ACCESS_MASK access, const OBJ + data_size_t len; + struct object_attributes *objattr; + ++ TRACE( "access %#x, name %s, initial %d, max %d\n", (int)access, ++ attr ? debugstr_us(attr->ObjectName) : "(null)", (int)initial, (int)max ); ++ + *handle = 0; + if (max <= 0 || initial < 0 || initial > max) return STATUS_INVALID_PARAMETER; + if ((ret = alloc_object_attributes( attr, &objattr, &len ))) return ret; + +- if (do_esync()) +- return esync_create_semaphore( handle, access, attr, initial, max ); +- + SERVER_START_REQ( create_semaphore ) + { + req->access = access; +@@ -279,11 +1182,9 @@ NTSTATUS WINAPI NtOpenSemaphore( HANDLE *handle, ACCESS_MASK access, const OBJEC + { + unsigned int ret; + +- *handle = 0; +- +- if (do_esync()) +- return esync_open_semaphore( handle, access, attr ); ++ TRACE( "access %#x, name %s\n", (int)access, attr ? debugstr_us(attr->ObjectName) : "(null)" ); + ++ *handle = 0; + if ((ret = validate_open_object_attributes( attr ))) return ret; + + SERVER_START_REQ( open_semaphore ) +@@ -320,8 +1221,11 @@ NTSTATUS WINAPI NtQuerySemaphore( HANDLE handle, SEMAPHORE_INFORMATION_CLASS cla + + if (len != sizeof(SEMAPHORE_BASIC_INFORMATION)) return STATUS_INFO_LENGTH_MISMATCH; + +- if (do_esync()) +- return esync_query_semaphore( handle, info, ret_len ); ++ if ((ret = fast_query_semaphore( handle, out )) != STATUS_NOT_IMPLEMENTED) ++ { ++ if (!ret && ret_len) *ret_len = sizeof(SEMAPHORE_BASIC_INFORMATION); ++ return ret; ++ } + + SERVER_START_REQ( query_semaphore ) + { +@@ -345,8 +1249,10 @@ NTSTATUS WINAPI NtReleaseSemaphore( HANDLE handle, ULONG count, ULONG *previous + { + unsigned int ret; + +- if (do_esync()) +- return esync_release_semaphore( handle, count, previous ); ++ TRACE( "handle %p, count %u, prev_count %p\n", handle, (int)count, previous ); ++ ++ if ((ret = fast_release_semaphore( handle, count, previous )) != STATUS_NOT_IMPLEMENTED) ++ return ret; + + SERVER_START_REQ( release_semaphore ) + { +@@ -372,12 +1278,11 @@ NTSTATUS WINAPI NtCreateEvent( HANDLE *handle, ACCESS_MASK access, const OBJECT_ + data_size_t len; + struct object_attributes *objattr; + ++ TRACE( "access %#x, name %s, type %u, state %u\n", (int)access, ++ attr ? debugstr_us(attr->ObjectName) : "(null)", type, state ); ++ + *handle = 0; + if (type != NotificationEvent && type != SynchronizationEvent) return STATUS_INVALID_PARAMETER; +- +- if (do_esync()) +- return esync_create_event( handle, access, attr, type, state ); +- + if ((ret = alloc_object_attributes( attr, &objattr, &len ))) return ret; + + SERVER_START_REQ( create_event ) +@@ -403,12 +1308,11 @@ NTSTATUS WINAPI NtOpenEvent( HANDLE *handle, ACCESS_MASK access, const OBJECT_AT + { + unsigned int ret; + ++ TRACE( "access %#x, name %s\n", (int)access, attr ? debugstr_us(attr->ObjectName) : "(null)" ); ++ + *handle = 0; + if ((ret = validate_open_object_attributes( attr ))) return ret; + +- if (do_esync()) +- return esync_open_event( handle, access, attr ); +- + SERVER_START_REQ( open_event ) + { + req->access = access; +@@ -429,11 +1333,12 @@ NTSTATUS WINAPI NtOpenEvent( HANDLE *handle, ACCESS_MASK access, const OBJECT_AT + */ + NTSTATUS WINAPI NtSetEvent( HANDLE handle, LONG *prev_state ) + { +- /* This comment is a dummy to make sure this patch applies in the right place. */ + unsigned int ret; + +- if (do_esync()) +- return esync_set_event( handle ); ++ TRACE( "handle %p, prev_state %p\n", handle, prev_state ); ++ ++ if ((ret = fast_set_event( handle, prev_state )) != STATUS_NOT_IMPLEMENTED) ++ return ret; + + SERVER_START_REQ( event_op ) + { +@@ -452,12 +1357,12 @@ NTSTATUS WINAPI NtSetEvent( HANDLE handle, LONG *prev_state ) + */ + NTSTATUS WINAPI NtResetEvent( HANDLE handle, LONG *prev_state ) + { +- /* This comment is a dummy to make sure this patch applies in the right place. */ + unsigned int ret; + +- if (do_esync()) +- return esync_reset_event( handle ); ++ TRACE( "handle %p, prev_state %p\n", handle, prev_state ); + ++ if ((ret = fast_reset_event( handle, prev_state )) != STATUS_NOT_IMPLEMENTED) ++ return ret; + + SERVER_START_REQ( event_op ) + { +@@ -488,8 +1393,10 @@ NTSTATUS WINAPI NtPulseEvent( HANDLE handle, LONG *prev_state ) + { + unsigned int ret; + +- if (do_esync()) +- return esync_pulse_event( handle ); ++ TRACE( "handle %p, prev_state %p\n", handle, prev_state ); ++ ++ if ((ret = fast_pulse_event( handle, prev_state )) != STATUS_NOT_IMPLEMENTED) ++ return ret; + + SERVER_START_REQ( event_op ) + { +@@ -522,8 +1429,11 @@ NTSTATUS WINAPI NtQueryEvent( HANDLE handle, EVENT_INFORMATION_CLASS class, + + if (len != sizeof(EVENT_BASIC_INFORMATION)) return STATUS_INFO_LENGTH_MISMATCH; + +- if (do_esync()) +- return esync_query_event( handle, info, ret_len ); ++ if ((ret = fast_query_event( handle, out )) != STATUS_NOT_IMPLEMENTED) ++ { ++ if (!ret && ret_len) *ret_len = sizeof(EVENT_BASIC_INFORMATION); ++ return ret; ++ } + + SERVER_START_REQ( query_event ) + { +@@ -550,11 +1460,10 @@ NTSTATUS WINAPI NtCreateMutant( HANDLE *handle, ACCESS_MASK access, const OBJECT + data_size_t len; + struct object_attributes *objattr; + +- *handle = 0; +- +- if (do_esync()) +- return esync_create_mutex( handle, access, attr, owned ); ++ TRACE( "access %#x, name %s, owned %u\n", (int)access, ++ attr ? debugstr_us(attr->ObjectName) : "(null)", owned ); + ++ *handle = 0; + if ((ret = alloc_object_attributes( attr, &objattr, &len ))) return ret; + + SERVER_START_REQ( create_mutex ) +@@ -579,12 +1488,11 @@ NTSTATUS WINAPI NtOpenMutant( HANDLE *handle, ACCESS_MASK access, const OBJECT_A + { + unsigned int ret; + ++ TRACE( "access %#x, name %s\n", (int)access, attr ? debugstr_us(attr->ObjectName) : "(null)" ); ++ + *handle = 0; + if ((ret = validate_open_object_attributes( attr ))) return ret; + +- if (do_esync()) +- return esync_open_mutex( handle, access, attr ); +- + SERVER_START_REQ( open_mutex ) + { + req->access = access; +@@ -607,8 +1515,10 @@ NTSTATUS WINAPI NtReleaseMutant( HANDLE handle, LONG *prev_count ) + { + unsigned int ret; + +- if (do_esync()) +- return esync_release_mutex( handle, prev_count ); ++ TRACE( "handle %p, prev_count %p\n", handle, prev_count ); ++ ++ if ((ret = fast_release_mutex( handle, prev_count )) != STATUS_NOT_IMPLEMENTED) ++ return ret; + + SERVER_START_REQ( release_mutex ) + { +@@ -640,8 +1550,11 @@ NTSTATUS WINAPI NtQueryMutant( HANDLE handle, MUTANT_INFORMATION_CLASS class, + + if (len != sizeof(MUTANT_BASIC_INFORMATION)) return STATUS_INFO_LENGTH_MISMATCH; + +- if (do_esync()) +- return esync_query_mutex( handle, info, ret_len ); ++ if ((ret = fast_query_mutex( handle, out )) != STATUS_NOT_IMPLEMENTED) ++ { ++ if (!ret && ret_len) *ret_len = sizeof(MUTANT_BASIC_INFORMATION); ++ return ret; ++ } + + SERVER_START_REQ( query_mutex ) + { +@@ -1350,6 +2263,9 @@ NTSTATUS WINAPI NtCreateTimer( HANDLE *handle, ACCESS_MASK access, const OBJECT_ + data_size_t len; + struct object_attributes *objattr; + ++ TRACE( "access %#x, name %s, type %u\n", (int)access, ++ attr ? debugstr_us(attr->ObjectName) : "(null)", type ); ++ + *handle = 0; + if (type != NotificationTimer && type != SynchronizationTimer) return STATUS_INVALID_PARAMETER; + if ((ret = alloc_object_attributes( attr, &objattr, &len ))) return ret; +@@ -1377,6 +2293,8 @@ NTSTATUS WINAPI NtOpenTimer( HANDLE *handle, ACCESS_MASK access, const OBJECT_AT + { + unsigned int ret; + ++ TRACE( "access %#x, name %s\n", (int)access, attr ? debugstr_us(attr->ObjectName) : "(null)" ); ++ + *handle = 0; + if ((ret = validate_open_object_attributes( attr ))) return ret; + +@@ -1430,6 +2348,8 @@ NTSTATUS WINAPI NtCancelTimer( HANDLE handle, BOOLEAN *state ) + { + unsigned int ret; + ++ TRACE( "handle %p, state %p\n", handle, state ); ++ + SERVER_START_REQ( cancel_timer ) + { + req->handle = wine_server_obj_handle( handle ); +@@ -1498,20 +2418,29 @@ NTSTATUS WINAPI NtWaitForMultipleObjects( DWORD count, const HANDLE *handles, BO + { + select_op_t select_op; + UINT i, flags = SELECT_INTERRUPTIBLE; ++ unsigned int ret; + + if (!count || count > MAXIMUM_WAIT_OBJECTS) return STATUS_INVALID_PARAMETER_1; + +- if (do_esync()) ++ if (TRACE_ON(sync)) + { +- NTSTATUS ret = esync_wait_objects( count, handles, wait_any, alertable, timeout ); +- if (ret != STATUS_NOT_IMPLEMENTED) +- return ret; ++ TRACE( "wait_any %u, alertable %u, handles {%p", wait_any, alertable, handles[0] ); ++ for (i = 1; i < count; i++) TRACE( ", %p", handles[i] ); ++ TRACE( "}, timeout %s\n", debugstr_timeout(timeout) ); ++ } ++ ++ if ((ret = fast_wait( count, handles, wait_any, alertable, timeout )) != STATUS_NOT_IMPLEMENTED) ++ { ++ TRACE( "-> %#x\n", ret ); ++ return ret; + } + + if (alertable) flags |= SELECT_ALERTABLE; + select_op.wait.op = wait_any ? SELECT_WAIT : SELECT_WAIT_ALL; + for (i = 0; i < count; i++) select_op.wait.handles[i] = wine_server_obj_handle( handles[i] ); +- return server_wait( &select_op, offsetof( select_op_t, wait.handles[count] ), flags, timeout ); ++ ret = server_wait( &select_op, offsetof( select_op_t, wait.handles[count] ), flags, timeout ); ++ TRACE( "-> %#x\n", ret ); ++ return ret; + } + + +@@ -1532,12 +2461,15 @@ NTSTATUS WINAPI NtSignalAndWaitForSingleObject( HANDLE signal, HANDLE wait, + { + select_op_t select_op; + UINT flags = SELECT_INTERRUPTIBLE; ++ NTSTATUS ret; + +- if (do_esync()) +- return esync_signal_and_wait( signal, wait, alertable, timeout ); ++ TRACE( "signal %p, wait %p, alertable %u, timeout %s\n", signal, wait, alertable, debugstr_timeout(timeout) ); + + if (!signal) return STATUS_INVALID_HANDLE; + ++ if ((ret = fast_signal_and_wait( signal, wait, alertable, timeout )) != STATUS_NOT_IMPLEMENTED) ++ return ret; ++ + if (alertable) flags |= SELECT_ALERTABLE; + select_op.signal_and_wait.op = SELECT_SIGNAL_AND_WAIT; + select_op.signal_and_wait.wait = wine_server_obj_handle( wait ); +@@ -1760,6 +2692,9 @@ NTSTATUS WINAPI NtCreateKeyedEvent( HANDLE *handle, ACCESS_MASK access, + data_size_t len; + struct object_attributes *objattr; + ++ TRACE( "access %#x, name %s, flags %#x\n", (int)access, ++ attr ? debugstr_us(attr->ObjectName) : "(null)", (int)flags ); ++ + *handle = 0; + if ((ret = alloc_object_attributes( attr, &objattr, &len ))) return ret; + +@@ -1784,6 +2719,8 @@ NTSTATUS WINAPI NtOpenKeyedEvent( HANDLE *handle, ACCESS_MASK access, const OBJE + { + unsigned int ret; + ++ TRACE( "access %#x, name %s\n", (int)access, attr ? debugstr_us(attr->ObjectName) : "(null)" ); ++ + *handle = 0; + if ((ret = validate_open_object_attributes( attr ))) return ret; + +@@ -1810,6 +2747,8 @@ NTSTATUS WINAPI NtWaitForKeyedEvent( HANDLE handle, const void *key, + select_op_t select_op; + UINT flags = SELECT_INTERRUPTIBLE; + ++ TRACE( "handle %p, key %p, alertable %u, timeout %s\n", handle, key, alertable, debugstr_timeout(timeout) ); ++ + if (!handle) handle = keyed_event; + if ((ULONG_PTR)key & 1) return STATUS_INVALID_PARAMETER_1; + if (alertable) flags |= SELECT_ALERTABLE; +@@ -1829,6 +2768,8 @@ NTSTATUS WINAPI NtReleaseKeyedEvent( HANDLE handle, const void *key, + select_op_t select_op; + UINT flags = SELECT_INTERRUPTIBLE; + ++ TRACE( "handle %p, key %p, alertable %u, timeout %s\n", handle, key, alertable, debugstr_timeout(timeout) ); ++ + if (!handle) handle = keyed_event; + if ((ULONG_PTR)key & 1) return STATUS_INVALID_PARAMETER_1; + if (alertable) flags |= SELECT_ALERTABLE; +diff --git a/dlls/ntdll/unix/thread.c b/dlls/ntdll/unix/thread.c +index 7f2f1b9..3f706f4 100644 +--- a/dlls/ntdll/unix/thread.c ++++ b/dlls/ntdll/unix/thread.c +@@ -1776,7 +1776,7 @@ NTSTATUS get_thread_context( HANDLE handle, void *context, BOOL *self, USHORT ma + + if (ret == STATUS_PENDING) + { +- NtWaitForSingleObject( context_handle, FALSE, NULL ); ++ server_wait_for_object( context_handle, FALSE, NULL ); + + SERVER_START_REQ( get_thread_context ) + { +diff --git a/dlls/ntdll/unix/unix_private.h b/dlls/ntdll/unix/unix_private.h +index 4e5b282..fc5b259 100644 +--- a/dlls/ntdll/unix/unix_private.h ++++ b/dlls/ntdll/unix/unix_private.h +@@ -93,7 +93,6 @@ struct ntdll_thread_data + { + void *cpu_data[16]; /* reserved for CPU-specific data */ + void *kernel_stack; /* stack for thread startup and kernel syscalls */ +- int esync_apc_fd; /* fd to wait on for user APCs */ + int request_fd; /* fd for sending server requests */ + int reply_fd; /* fd for receiving server replies */ + int wait_fd[2]; /* fd for sleeping server requests */ +@@ -102,6 +101,7 @@ struct ntdll_thread_data + PRTL_THREAD_START_ROUTINE start; /* thread entry point */ + void *param; /* thread entry point parameter */ + void *jmp_buf; /* setjmp buffer for exception handling */ ++ int fast_alert_obj; /* fd for the fast alert event */ + }; + + C_ASSERT( sizeof(struct ntdll_thread_data) <= sizeof(((TEB *)0)->GdiTebBatch) ); +@@ -196,6 +196,8 @@ extern NTSTATUS load_start_exe( WCHAR **image, void **module ); + extern ULONG_PTR redirect_arm64ec_rva( void *module, ULONG_PTR rva, const IMAGE_ARM64EC_METADATA *metadata ); + extern void start_server( BOOL debug ); + ++extern pthread_mutex_t fd_cache_mutex; ++ + extern unsigned int server_call_unlocked( void *req_ptr ); + extern void server_enter_uninterrupted_section( pthread_mutex_t *mutex, sigset_t *sigset ); + extern void server_leave_uninterrupted_section( pthread_mutex_t *mutex, sigset_t *sigset ); +@@ -203,6 +205,7 @@ extern unsigned int server_select( const select_op_t *select_op, data_size_t siz + timeout_t abs_timeout, context_t *context, user_apc_t *user_apc ); + extern unsigned int server_wait( const select_op_t *select_op, data_size_t size, UINT flags, + const LARGE_INTEGER *timeout ); ++extern unsigned int server_wait_for_object( HANDLE handle, BOOL alertable, const LARGE_INTEGER *timeout ); + extern unsigned int server_queue_process_apc( HANDLE process, const apc_call_t *call, + apc_result_t *result ); + extern int server_get_unix_fd( HANDLE handle, unsigned int wanted_access, int *unix_fd, +@@ -353,6 +356,8 @@ extern NTSTATUS wow64_wine_spawnvp( void *args ); + + extern void dbg_init(void); + ++extern void close_fast_sync_obj( HANDLE handle ); ++ + extern NTSTATUS call_user_apc_dispatcher( CONTEXT *context_ptr, ULONG_PTR arg1, ULONG_PTR arg2, ULONG_PTR arg3, + PNTAPCFUNC func, NTSTATUS status ); + extern NTSTATUS call_user_exception_dispatcher( EXCEPTION_RECORD *rec, CONTEXT *context ); +@@ -361,6 +366,7 @@ extern void call_raise_user_exception_dispatcher(void); + #define IMAGE_DLLCHARACTERISTICS_PREFER_NATIVE 0x0010 /* Wine extension */ + + #define TICKSPERSEC 10000000 ++#define NSECPERSEC 1000000000 + #define SECS_1601_TO_1970 ((369 * 365 + 89) * (ULONGLONG)86400) + + static inline ULONGLONG ticks_from_time_t( time_t time ) +@@ -419,7 +425,7 @@ static inline async_data_t server_async( HANDLE handle, struct async_fileio *use + + static inline NTSTATUS wait_async( HANDLE handle, BOOL alertable ) + { +- return NtWaitForSingleObject( handle, alertable, NULL ); ++ return server_wait_for_object( handle, alertable, NULL ); + } + + static inline BOOL in_wow64_call(void) +diff --git a/dlls/ntdll/unix/virtual.c b/dlls/ntdll/unix/virtual.c +index 1e1af8c..a54ac45 100644 +--- a/dlls/ntdll/unix/virtual.c ++++ b/dlls/ntdll/unix/virtual.c +@@ -3712,7 +3712,6 @@ static TEB *init_teb( void *ptr, BOOL is_wow ) + teb->StaticUnicodeString.Buffer = teb->StaticUnicodeBuffer; + teb->StaticUnicodeString.MaximumLength = sizeof(teb->StaticUnicodeBuffer); + thread_data = (struct ntdll_thread_data *)&teb->GdiTebBatch; +- thread_data->esync_apc_fd = -1; + thread_data->request_fd = -1; + thread_data->reply_fd = -1; + thread_data->wait_fd[0] = -1; +diff --git a/dlls/rpcrt4/rpc_server.c b/dlls/rpcrt4/rpc_server.c +index 02193b8..eb733f6 100644 +--- a/dlls/rpcrt4/rpc_server.c ++++ b/dlls/rpcrt4/rpc_server.c +@@ -701,6 +701,10 @@ static DWORD CALLBACK RPCRT4_server_thread(LPVOID the_arg) + } + LeaveCriticalSection(&cps->cs); + ++ EnterCriticalSection(&listen_cs); ++ CloseHandle(cps->server_thread); ++ cps->server_thread = NULL; ++ LeaveCriticalSection(&listen_cs); + TRACE("done\n"); + return 0; + } +@@ -1566,10 +1570,7 @@ RPC_STATUS WINAPI RpcMgmtWaitServerListen( void ) + LIST_FOR_EACH_ENTRY(protseq, &protseqs, RpcServerProtseq, entry) + { + if ((wait_thread = protseq->server_thread)) +- { +- protseq->server_thread = NULL; + break; +- } + } + LeaveCriticalSection(&server_cs); + if (!wait_thread) +@@ -1578,7 +1579,6 @@ RPC_STATUS WINAPI RpcMgmtWaitServerListen( void ) + TRACE("waiting for thread %lu\n", GetThreadId(wait_thread)); + LeaveCriticalSection(&listen_cs); + WaitForSingleObject(wait_thread, INFINITE); +- CloseHandle(wait_thread); + EnterCriticalSection(&listen_cs); + } + if (listen_done_event == event) +diff --git a/dlls/webservices/tests/channel.c b/dlls/webservices/tests/channel.c +index c64027f..ddcf896 100644 +--- a/dlls/webservices/tests/channel.c ++++ b/dlls/webservices/tests/channel.c +@@ -1214,6 +1214,9 @@ static const char send_record_begin[] = { + static const char send_record_middle[] = { 0x01, 0x56, 0x0e, 0x42 }; + static const char send_record_end[] = { 0x08, 0x02, 0x6e, 0x73, 0x89, 0xff, 0x01, 0x01 }; + ++#pragma GCC diagnostic ignored "-Warray-bounds" ++#pragma GCC diagnostic ignored "-Wstringop-overflow" ++ + static BOOL send_dict_str( int sock, char *addr, const char *str, int dict_str_count ) + { + char buf[512], dict_buf[256], body_buf[128], dict_size_buf[5]; +diff --git a/include/config.h.in b/include/config.h.in +index a53870f..ba0bdbd 100644 +--- a/include/config.h.in ++++ b/include/config.h.in +@@ -177,6 +177,9 @@ + /* Define to 1 if you have the header file. */ + #undef HAVE_LINUX_MAJOR_H + ++/* Define to 1 if you have the header file. */ ++#undef HAVE_LINUX_NTSYNC_H ++ + /* Define to 1 if you have the header file. */ + #undef HAVE_LINUX_PARAM_H + +@@ -522,9 +519,6 @@ + /* Define to 1 if you have the header file. */ + #undef HAVE_SYS_EPOLL_H + +-/* Define to 1 if you have the header file. */ +-#undef HAVE_SYS_EVENTFD_H +- + /* Define to 1 if you have the header file. */ + #undef HAVE_SYS_EVENT_H + +diff --git a/include/wine/server_protocol.h b/include/wine/server_protocol.h +index 34655d1..1f8e10a 100644 +--- a/include/wine/server_protocol.h ++++ b/include/wine/server_protocol.h +@@ -5634,6 +5634,88 @@ struct get_next_thread_reply + }; + + ++enum fast_sync_type ++{ ++ FAST_SYNC_SEMAPHORE = 1, ++ FAST_SYNC_MUTEX, ++ FAST_SYNC_AUTO_EVENT, ++ FAST_SYNC_MANUAL_EVENT, ++ FAST_SYNC_AUTO_SERVER, ++ FAST_SYNC_MANUAL_SERVER, ++ FAST_SYNC_QUEUE, ++}; ++ ++ ++ ++struct get_linux_sync_device_request ++{ ++ struct request_header __header; ++ char __pad_12[4]; ++}; ++struct get_linux_sync_device_reply ++{ ++ struct reply_header __header; ++ obj_handle_t handle; ++ char __pad_12[4]; ++}; ++ ++ ++ ++struct get_linux_sync_obj_request ++{ ++ struct request_header __header; ++ obj_handle_t handle; ++}; ++struct get_linux_sync_obj_reply ++{ ++ struct reply_header __header; ++ obj_handle_t handle; ++ int type; ++ unsigned int access; ++ char __pad_20[4]; ++}; ++ ++ ++ ++struct fast_select_queue_request ++{ ++ struct request_header __header; ++ obj_handle_t handle; ++}; ++struct fast_select_queue_reply ++{ ++ struct reply_header __header; ++}; ++ ++ ++ ++struct fast_unselect_queue_request ++{ ++ struct request_header __header; ++ obj_handle_t handle; ++ int signaled; ++ char __pad_20[4]; ++}; ++struct fast_unselect_queue_reply ++{ ++ struct reply_header __header; ++}; ++ ++ ++ ++struct get_fast_alert_event_request ++{ ++ struct request_header __header; ++ char __pad_12[4]; ++}; ++struct get_fast_alert_event_reply ++{ ++ struct reply_header __header; ++ obj_handle_t handle; ++ char __pad_12[4]; ++}; ++ ++ + enum request + { + REQ_new_process, +@@ -5921,12 +6003,12 @@ enum request + REQ_suspend_process, + REQ_resume_process, + REQ_get_next_thread, +- REQ_create_esync, +- REQ_open_esync, +- REQ_get_esync_fd, +- REQ_esync_msgwait, ++ REQ_get_linux_sync_device, ++ REQ_get_linux_sync_obj, ++ REQ_fast_select_queue, ++ REQ_fast_unselect_queue, + REQ_set_keyboard_repeat, +- REQ_get_esync_apc_fd, ++ REQ_get_fast_alert_event, + REQ_NB_REQUESTS + }; + +@@ -6213,12 +6300,12 @@ union generic_request + struct suspend_process_request suspend_process_request; + struct resume_process_request resume_process_request; + struct get_next_thread_request get_next_thread_request; +- struct create_esync_request create_esync_request; +- struct open_esync_request open_esync_request; +- struct get_esync_fd_request get_esync_fd_request; +- struct esync_msgwait_request esync_msgwait_request; ++ struct get_linux_sync_device_request get_linux_sync_device_request; ++ struct get_linux_sync_obj_request get_linux_sync_obj_request; ++ struct fast_select_queue_request fast_select_queue_request; ++ struct fast_unselect_queue_request fast_unselect_queue_request; + struct set_keyboard_repeat_request set_keyboard_repeat_request; +- struct get_esync_apc_fd_request get_esync_apc_fd_request; ++ struct get_fast_alert_event_request get_fast_alert_event_request; + }; + union generic_reply + { +@@ -6503,12 +6595,12 @@ union generic_reply + struct suspend_process_reply suspend_process_reply; + struct resume_process_reply resume_process_reply; + struct get_next_thread_reply get_next_thread_reply; +- struct create_esync_reply create_esync_reply; +- struct open_esync_reply open_esync_reply; +- struct get_esync_fd_reply get_esync_fd_reply; +- struct esync_msgwait_reply esync_msgwait_reply; ++ struct get_linux_sync_device_reply get_linux_sync_device_reply; ++ struct get_linux_sync_obj_reply get_linux_sync_obj_reply; ++ struct fast_select_queue_reply fast_select_queue_reply; ++ struct fast_unselect_queue_reply fast_unselect_queue_reply; + struct set_keyboard_repeat_reply set_keyboard_repeat_reply; +- struct get_esync_apc_fd_reply get_esync_apc_fd_reply; ++ struct get_fast_alert_event_reply get_fast_alert_event_reply; + }; + + /* ### protocol_version begin ### */ +diff --git a/server/Makefile.in b/server/Makefile.in +index b164193..b30df66 100644 +--- a/server/Makefile.in ++++ b/server/Makefile.in +@@ -11,8 +11,8 @@ SOURCES = \ + debugger.c \ + device.c \ + directory.c \ +- esync.c \ + event.c \ ++ fast_sync.c \ + fd.c \ + file.c \ + handle.c \ +diff --git a/server/async.c b/server/async.c +index b0f9fe4..02fb966 100644 +--- a/server/async.c ++++ b/server/async.c +@@ -77,7 +77,6 @@ static const struct object_ops async_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + async_signaled, /* signaled */ +- NULL, /* get_esync_fd */ + async_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -90,6 +89,7 @@ static const struct object_ops async_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + async_destroy /* destroy */ + }; +@@ -687,7 +687,6 @@ static const struct object_ops iosb_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -700,6 +699,7 @@ static const struct object_ops iosb_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + iosb_destroy /* destroy */ + }; +diff --git a/server/atom.c b/server/atom.c +index d9824de..ba320c4 100644 +--- a/server/atom.c ++++ b/server/atom.c +@@ -79,7 +79,6 @@ static const struct object_ops atom_table_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -92,6 +91,7 @@ static const struct object_ops atom_table_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + atom_table_destroy /* destroy */ + }; +diff --git a/server/change.c b/server/change.c +index ec61711..dafa7e8 100644 +--- a/server/change.c ++++ b/server/change.c +@@ -112,7 +112,6 @@ static const struct object_ops dir_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + default_fd_signaled, /* signaled */ +- default_fd_get_esync_fd, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + dir_get_fd, /* get_fd */ +@@ -125,6 +124,7 @@ static const struct object_ops dir_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ default_fd_get_fast_sync, /* get_fast_sync */ + dir_close_handle, /* close_handle */ + dir_destroy /* destroy */ + }; +diff --git a/server/clipboard.c b/server/clipboard.c +index 8b265f2..de9f84f 100644 +--- a/server/clipboard.c ++++ b/server/clipboard.c +@@ -76,7 +76,6 @@ static const struct object_ops clipboard_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -89,6 +88,7 @@ static const struct object_ops clipboard_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + clipboard_destroy /* destroy */ + }; +diff --git a/server/completion.c b/server/completion.c +index 3d4be86..5ec6d20 100644 +--- a/server/completion.c ++++ b/server/completion.c +@@ -61,10 +61,12 @@ struct completion + struct object obj; + struct list queue; + unsigned int depth; ++ struct fast_sync *fast_sync; + }; + + static void completion_dump( struct object*, int ); + static int completion_signaled( struct object *obj, struct wait_queue_entry *entry ); ++static struct fast_sync *completion_get_fast_sync( struct object *obj ); + static void completion_destroy( struct object * ); + + static const struct object_ops completion_ops = +@@ -75,7 +77,6 @@ static const struct object_ops completion_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + completion_signaled, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -88,6 +89,7 @@ static const struct object_ops completion_ops = + default_unlink_name, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ completion_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + completion_destroy /* destroy */ + }; +@@ -110,6 +112,7 @@ static void completion_destroy( struct object *obj) + { + free( tmp ); + } ++ if (completion->fast_sync) release_object( completion->fast_sync ); + } + + static void completion_dump( struct object *obj, int verbose ) +@@ -127,6 +130,16 @@ static int completion_signaled( struct object *obj, struct wait_queue_entry *ent + return !list_empty( &completion->queue ); + } + ++static struct fast_sync *completion_get_fast_sync( struct object *obj ) ++{ ++ struct completion *completion = (struct completion *)obj; ++ ++ if (!completion->fast_sync) ++ completion->fast_sync = fast_create_event( FAST_SYNC_MANUAL_SERVER, !list_empty( &completion->queue ) ); ++ if (completion->fast_sync) grab_object( completion->fast_sync ); ++ return completion->fast_sync; ++} ++ + static struct completion *create_completion( struct object *root, const struct unicode_str *name, + unsigned int attr, unsigned int concurrent, + const struct security_descriptor *sd ) +@@ -139,6 +152,7 @@ static struct completion *create_completion( struct object *root, const struct u + { + list_init( &completion->queue ); + completion->depth = 0; ++ completion->fast_sync = NULL; + } + } + +@@ -166,6 +180,7 @@ void add_completion( struct completion *completion, apc_param_t ckey, apc_param_ + list_add_tail( &completion->queue, &msg->queue_entry ); + completion->depth++; + wake_up( &completion->obj, 1 ); ++ fast_set_event( completion->fast_sync ); + } + + /* create a completion */ +@@ -232,6 +247,8 @@ DECL_HANDLER(remove_completion) + reply->status = msg->status; + reply->information = msg->information; + free( msg ); ++ if (list_empty( &completion->queue )) ++ fast_reset_event( completion->fast_sync ); + } + + release_object( completion ); +diff --git a/server/console.c b/server/console.c +index dbd4a97..17708df 100644 +--- a/server/console.c ++++ b/server/console.c +@@ -41,7 +41,6 @@ + #include "wincon.h" + #include "winternl.h" + #include "wine/condrv.h" +-#include "esync.h" + + struct screen_buffer; + +@@ -62,6 +61,7 @@ struct console + struct fd *fd; /* for bare console, attached input fd */ + struct async_queue ioctl_q; /* ioctl queue */ + struct async_queue read_q; /* read queue */ ++ struct fast_sync *fast_sync; /* fast synchronization object */ + }; + + static void console_dump( struct object *obj, int verbose ); +@@ -73,6 +73,7 @@ static struct object *console_lookup_name( struct object *obj, struct unicode_st + static struct object *console_open_file( struct object *obj, unsigned int access, + unsigned int sharing, unsigned int options ); + static int console_add_queue( struct object *obj, struct wait_queue_entry *entry ); ++static struct fast_sync *console_get_fast_sync( struct object *obj ); + + static const struct object_ops console_ops = + { +@@ -82,7 +83,6 @@ static const struct object_ops console_ops = + console_add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + console_signaled, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + console_get_fd, /* get_fd */ +@@ -95,6 +95,7 @@ static const struct object_ops console_ops = + NULL, /* unlink_name */ + console_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ console_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + console_destroy /* destroy */ + }; +@@ -132,27 +133,27 @@ struct console_host_ioctl + + struct console_server + { +- struct object obj; /* object header */ +- struct fd *fd; /* pseudo-fd for ioctls */ +- struct console *console; /* attached console */ +- struct list queue; /* ioctl queue */ +- struct list read_queue; /* blocking read queue */ ++ struct object obj; /* object header */ ++ struct fd *fd; /* pseudo-fd for ioctls */ ++ struct console *console; /* attached console */ ++ struct list queue; /* ioctl queue */ ++ struct list read_queue; /* blocking read queue */ + unsigned int busy : 1; /* flag if server processing an ioctl */ + unsigned int once_input : 1; /* flag if input thread has already been requested */ +- int term_fd; /* UNIX terminal fd */ +- struct termios termios; /* original termios */ +- int esync_fd; ++ int term_fd; /* UNIX terminal fd */ ++ struct termios termios; /* original termios */ ++ struct fast_sync *fast_sync; /* fast synchronization object */ + }; + + static void console_server_dump( struct object *obj, int verbose ); + static void console_server_destroy( struct object *obj ); + static int console_server_signaled( struct object *obj, struct wait_queue_entry *entry ); +-static int console_server_get_esync_fd( struct object *obj, enum esync_type *type ); + static struct fd *console_server_get_fd( struct object *obj ); + static struct object *console_server_lookup_name( struct object *obj, struct unicode_str *name, + unsigned int attr, struct object *root ); + static struct object *console_server_open_file( struct object *obj, unsigned int access, + unsigned int sharing, unsigned int options ); ++static struct fast_sync *console_server_get_fast_sync( struct object *obj ); + + static const struct object_ops console_server_ops = + { +@@ -162,7 +163,6 @@ static const struct object_ops console_server_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + console_server_signaled, /* signaled */ +- console_server_get_esync_fd, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + console_server_get_fd, /* get_fd */ +@@ -175,6 +175,7 @@ static const struct object_ops console_server_ops = + NULL, /* unlink_name */ + console_server_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ console_server_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + console_server_destroy /* destroy */ + }; +@@ -223,6 +224,7 @@ static int screen_buffer_add_queue( struct object *obj, struct wait_queue_entry + static struct fd *screen_buffer_get_fd( struct object *obj ); + static struct object *screen_buffer_open_file( struct object *obj, unsigned int access, + unsigned int sharing, unsigned int options ); ++static struct fast_sync *screen_buffer_get_fast_sync( struct object *obj ); + + static const struct object_ops screen_buffer_ops = + { +@@ -232,7 +234,6 @@ static const struct object_ops screen_buffer_ops = + screen_buffer_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + screen_buffer_get_fd, /* get_fd */ +@@ -245,6 +246,7 @@ static const struct object_ops screen_buffer_ops = + NULL, /* unlink_name */ + screen_buffer_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ screen_buffer_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + screen_buffer_destroy /* destroy */ + }; +@@ -282,7 +284,6 @@ static const struct object_ops console_device_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -295,6 +296,7 @@ static const struct object_ops console_device_ops = + default_unlink_name, /* unlink_name */ + console_device_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + no_destroy /* destroy */ + }; +@@ -310,6 +312,7 @@ static struct object *console_input_open_file( struct object *obj, unsigned int + unsigned int sharing, unsigned int options ); + static int console_input_add_queue( struct object *obj, struct wait_queue_entry *entry ); + static struct fd *console_input_get_fd( struct object *obj ); ++static struct fast_sync *console_input_get_fast_sync( struct object *obj ); + static void console_input_destroy( struct object *obj ); + + static const struct object_ops console_input_ops = +@@ -320,7 +323,6 @@ static const struct object_ops console_input_ops = + console_input_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + console_input_get_fd, /* get_fd */ +@@ -333,6 +335,7 @@ static const struct object_ops console_input_ops = + default_unlink_name, /* unlink_name */ + console_input_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ console_input_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + console_input_destroy /* destroy */ + }; +@@ -368,6 +371,7 @@ static int console_output_add_queue( struct object *obj, struct wait_queue_entry + static struct fd *console_output_get_fd( struct object *obj ); + static struct object *console_output_open_file( struct object *obj, unsigned int access, + unsigned int sharing, unsigned int options ); ++static struct fast_sync *console_output_get_fast_sync( struct object *obj ); + static void console_output_destroy( struct object *obj ); + + static const struct object_ops console_output_ops = +@@ -378,7 +382,6 @@ static const struct object_ops console_output_ops = + console_output_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + console_output_get_fd, /* get_fd */ +@@ -391,6 +394,7 @@ static const struct object_ops console_output_ops = + default_unlink_name, /* unlink_name */ + console_output_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ console_output_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + console_output_destroy /* destroy */ + }; +@@ -437,7 +441,6 @@ static const struct object_ops console_connection_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + console_connection_get_fd, /* get_fd */ +@@ -450,6 +453,7 @@ static const struct object_ops console_connection_ops = + default_unlink_name, /* unlink_name */ + console_connection_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + console_connection_close_handle, /* close_handle */ + console_connection_destroy /* destroy */ + }; +@@ -549,6 +553,7 @@ static struct object *create_console(void) + console->server = NULL; + console->fd = NULL; + console->last_id = 0; ++ console->fast_sync = NULL; + init_async_queue( &console->ioctl_q ); + init_async_queue( &console->read_q ); + +@@ -588,6 +593,7 @@ static int queue_host_ioctl( struct console_server *server, unsigned int code, u + } + list_add_tail( &server->queue, &ioctl->entry ); + wake_up( &server->obj, 0 ); ++ fast_set_event( server->fast_sync ); + if (async) set_error( STATUS_PENDING ); + return 1; + } +@@ -600,8 +606,6 @@ static void disconnect_console_server( struct console_server *server ) + list_remove( &call->entry ); + console_host_ioctl_terminate( call, STATUS_CANCELLED ); + } +- if (do_esync()) +- esync_clear( server->esync_fd ); + while (!list_empty( &server->read_queue )) + { + struct console_host_ioctl *call = LIST_ENTRY( list_head( &server->read_queue ), struct console_host_ioctl, entry ); +@@ -622,6 +626,7 @@ static void disconnect_console_server( struct console_server *server ) + server->console->server = NULL; + server->console = NULL; + wake_up( &server->obj, 0 ); ++ fast_set_event( server->fast_sync ); + } + } + +@@ -776,6 +781,8 @@ static void console_destroy( struct object *obj ) + free_async_queue( &console->read_q ); + if (console->fd) + release_object( console->fd ); ++ ++ if (console->fast_sync) release_object( console->fast_sync ); + } + + static struct object *create_console_connection( struct console *console ) +@@ -823,6 +830,16 @@ static struct object *console_open_file( struct object *obj, unsigned int access + return grab_object( obj ); + } + ++static struct fast_sync *console_get_fast_sync( struct object *obj ) ++{ ++ struct console *console = (struct console *)obj; ++ ++ if (!console->fast_sync) ++ console->fast_sync = fast_create_event( FAST_SYNC_MANUAL_SERVER, console->signaled ); ++ if (console->fast_sync) grab_object( console->fast_sync ); ++ return console->fast_sync; ++} ++ + static void screen_buffer_dump( struct object *obj, int verbose ) + { + struct screen_buffer *screen_buffer = (struct screen_buffer *)obj; +@@ -872,6 +889,17 @@ static struct fd *screen_buffer_get_fd( struct object *obj ) + return NULL; + } + ++static struct fast_sync *screen_buffer_get_fast_sync( struct object *obj ) ++{ ++ struct screen_buffer *screen_buffer = (struct screen_buffer *)obj; ++ if (!screen_buffer->input) ++ { ++ set_error( STATUS_ACCESS_DENIED ); ++ return NULL; ++ } ++ return console_get_fast_sync( &screen_buffer->input->obj ); ++} ++ + static void console_server_dump( struct object *obj, int verbose ) + { + assert( obj->ops == &console_server_ops ); +@@ -884,7 +912,7 @@ static void console_server_destroy( struct object *obj ) + assert( obj->ops == &console_server_ops ); + disconnect_console_server( server ); + if (server->fd) release_object( server->fd ); +- if (do_esync()) close( server->esync_fd ); ++ if (server->fast_sync) release_object( server->fast_sync ); + } + + static struct object *console_server_lookup_name( struct object *obj, struct unicode_str *name, +@@ -926,13 +954,6 @@ static int console_server_signaled( struct object *obj, struct wait_queue_entry + return !server->console || !list_empty( &server->queue ); + } + +-static int console_server_get_esync_fd( struct object *obj, enum esync_type *type ) +-{ +- struct console_server *server = (struct console_server*)obj; +- *type = ESYNC_MANUAL_SERVER; +- return server->esync_fd; +-} +- + static struct fd *console_server_get_fd( struct object* obj ) + { + struct console_server *server = (struct console_server*)obj; +@@ -946,6 +967,17 @@ static struct object *console_server_open_file( struct object *obj, unsigned int + return grab_object( obj ); + } + ++static struct fast_sync *console_server_get_fast_sync( struct object *obj ) ++{ ++ struct console_server *server = (struct console_server *)obj; ++ int signaled = !server->console || !list_empty( &server->queue ); ++ ++ if (!server->fast_sync) ++ server->fast_sync = fast_create_event( FAST_SYNC_MANUAL_SERVER, signaled ); ++ if (server->fast_sync) grab_object( server->fast_sync ); ++ return server->fast_sync; ++} ++ + static struct object *create_console_server( void ) + { + struct console_server *server; +@@ -957,6 +989,7 @@ static struct object *create_console_server( void ) + server->term_fd = -1; + list_init( &server->queue ); + list_init( &server->read_queue ); ++ server->fast_sync = NULL; + server->fd = alloc_pseudo_fd( &console_server_fd_ops, &server->obj, FILE_SYNCHRONOUS_IO_NONALERT ); + if (!server->fd) + { +@@ -964,10 +997,6 @@ static struct object *create_console_server( void ) + return NULL; + } + allow_fd_caching(server->fd); +- server->esync_fd = -1; +- +- if (do_esync()) +- server->esync_fd = esync_create_fd( 0, 0 ); + + return &server->obj; + } +@@ -1426,6 +1455,16 @@ static struct object *console_input_open_file( struct object *obj, unsigned int + return grab_object( obj ); + } + ++static struct fast_sync *console_input_get_fast_sync( struct object *obj ) ++{ ++ if (!current->process->console) ++ { ++ set_error( STATUS_ACCESS_DENIED ); ++ return NULL; ++ } ++ return console_get_fast_sync( ¤t->process->console->obj ); ++} ++ + static void console_input_destroy( struct object *obj ) + { + struct console_input *console_input = (struct console_input *)obj; +@@ -1498,6 +1537,16 @@ static struct object *console_output_open_file( struct object *obj, unsigned int + return grab_object( obj ); + } + ++static struct fast_sync *console_output_get_fast_sync( struct object *obj ) ++{ ++ if (!current->process->console || !current->process->console->active) ++ { ++ set_error( STATUS_ACCESS_DENIED ); ++ return NULL; ++ } ++ return console_get_fast_sync( ¤t->process->console->obj ); ++} ++ + static void console_output_destroy( struct object *obj ) + { + struct console_output *console_output = (struct console_output *)obj; +@@ -1555,11 +1604,16 @@ DECL_HANDLER(get_next_console_request) + + if (!server->console->renderer) server->console->renderer = current; + +- if (!req->signal) server->console->signaled = 0; ++ if (!req->signal) ++ { ++ server->console->signaled = 0; ++ fast_reset_event( server->console->fast_sync ); ++ } + else if (!server->console->signaled) + { + server->console->signaled = 1; + wake_up( &server->console->obj, 0 ); ++ fast_set_event( server->console->fast_sync ); + } + + if (req->read) +@@ -1581,8 +1635,8 @@ DECL_HANDLER(get_next_console_request) + /* set result of previous ioctl */ + ioctl = LIST_ENTRY( list_head( &server->queue ), struct console_host_ioctl, entry ); + list_remove( &ioctl->entry ); +- if (do_esync() && list_empty( &server->queue )) +- esync_clear( server->esync_fd ); ++ if (list_empty( &server->queue )) ++ fast_reset_event( server->fast_sync ); + } + + if (ioctl) +@@ -1668,8 +1722,9 @@ DECL_HANDLER(get_next_console_request) + { + set_error( STATUS_PENDING ); + } +- if (do_esync() && list_empty( &server->queue )) +- esync_clear( server->esync_fd ); ++ ++ if (list_empty( &server->queue )) ++ fast_reset_event( server->fast_sync ); + + release_object( server ); + } +diff --git a/server/debugger.c b/server/debugger.c +index ca04d4c..7975fc4 100644 +--- a/server/debugger.c ++++ b/server/debugger.c +@@ -71,6 +71,7 @@ struct debug_obj + struct object obj; /* object header */ + struct list event_queue; /* pending events queue */ + unsigned int flags; /* debug flags */ ++ struct fast_sync *fast_sync; /* fast synchronization object */ + }; + + +@@ -86,7 +87,6 @@ static const struct object_ops debug_event_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + debug_event_signaled, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -99,12 +99,14 @@ static const struct object_ops debug_event_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + debug_event_destroy /* destroy */ + }; + + static void debug_obj_dump( struct object *obj, int verbose ); + static int debug_obj_signaled( struct object *obj, struct wait_queue_entry *entry ); ++static struct fast_sync *debug_obj_get_fast_sync( struct object *obj ); + static void debug_obj_destroy( struct object *obj ); + + static const struct object_ops debug_obj_ops = +@@ -115,7 +117,6 @@ static const struct object_ops debug_obj_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + debug_obj_signaled, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -128,6 +129,7 @@ static const struct object_ops debug_obj_ops = + default_unlink_name, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ debug_obj_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + debug_obj_destroy /* destroy */ + }; +@@ -255,6 +257,7 @@ static void link_event( struct debug_obj *debug_obj, struct debug_event *event ) + /* grab reference since debugger could be killed while trying to wake up */ + grab_object( debug_obj ); + wake_up( &debug_obj->obj, 0 ); ++ fast_set_event( debug_obj->fast_sync ); + release_object( debug_obj ); + } + } +@@ -267,6 +270,7 @@ static void resume_event( struct debug_obj *debug_obj, struct debug_event *event + { + grab_object( debug_obj ); + wake_up( &debug_obj->obj, 0 ); ++ fast_set_event( debug_obj->fast_sync ); + release_object( debug_obj ); + } + } +@@ -332,6 +336,17 @@ static int debug_obj_signaled( struct object *obj, struct wait_queue_entry *entr + return find_event_to_send( debug_obj ) != NULL; + } + ++static struct fast_sync *debug_obj_get_fast_sync( struct object *obj ) ++{ ++ struct debug_obj *debug_obj = (struct debug_obj *)obj; ++ int signaled = find_event_to_send( debug_obj ) != NULL; ++ ++ if (!debug_obj->fast_sync) ++ debug_obj->fast_sync = fast_create_event( FAST_SYNC_MANUAL_SERVER, signaled ); ++ if (debug_obj->fast_sync) grab_object( debug_obj->fast_sync ); ++ return debug_obj->fast_sync; ++} ++ + static void debug_obj_destroy( struct object *obj ) + { + struct list *ptr; +@@ -344,6 +359,8 @@ static void debug_obj_destroy( struct object *obj ) + /* free all pending events */ + while ((ptr = list_head( &debug_obj->event_queue ))) + unlink_event( debug_obj, LIST_ENTRY( ptr, struct debug_event, entry )); ++ ++ if (debug_obj->fast_sync) release_object( debug_obj->fast_sync ); + } + + struct debug_obj *get_debug_obj( struct process *process, obj_handle_t handle, unsigned int access ) +@@ -363,6 +380,7 @@ static struct debug_obj *create_debug_obj( struct object *root, const struct uni + { + debug_obj->flags = flags; + list_init( &debug_obj->event_queue ); ++ debug_obj->fast_sync = NULL; + } + } + return debug_obj; +@@ -571,6 +589,9 @@ DECL_HANDLER(wait_debug_event) + reply->tid = get_thread_id( event->sender ); + alloc_event_handles( event, current->process ); + set_reply_data( &event->data, min( get_reply_max_size(), sizeof(event->data) )); ++ ++ if (!find_event_to_send( debug_obj )) ++ fast_reset_event( debug_obj->fast_sync ); + } + else + { +diff --git a/server/device.c b/server/device.c +index c45d010..698fee6 100644 +--- a/server/device.c ++++ b/server/device.c +@@ -38,7 +38,6 @@ + #include "handle.h" + #include "request.h" + #include "process.h" +-#include "esync.h" + + /* IRP object */ + +@@ -67,7 +66,6 @@ static const struct object_ops irp_call_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -80,6 +78,7 @@ static const struct object_ops irp_call_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + irp_call_destroy /* destroy */ + }; +@@ -94,12 +93,12 @@ struct device_manager + struct list requests; /* list of pending irps across all devices */ + struct irp_call *current_call; /* call currently executed on client side */ + struct wine_rb_tree kernel_objects; /* map of objects that have client side pointer associated */ +- int esync_fd; /* esync file descriptor */ ++ struct fast_sync *fast_sync; /* fast synchronization object */ + }; + + static void device_manager_dump( struct object *obj, int verbose ); + static int device_manager_signaled( struct object *obj, struct wait_queue_entry *entry ); +-static int device_manager_get_esync_fd( struct object *obj, enum esync_type *type ); ++static struct fast_sync *device_manager_get_fast_sync( struct object *obj ); + static void device_manager_destroy( struct object *obj ); + + static const struct object_ops device_manager_ops = +@@ -110,7 +109,6 @@ static const struct object_ops device_manager_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + device_manager_signaled, /* signaled */ +- device_manager_get_esync_fd, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -123,6 +121,7 @@ static const struct object_ops device_manager_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ device_manager_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + device_manager_destroy /* destroy */ + }; +@@ -168,7 +167,6 @@ static const struct object_ops device_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -181,6 +179,7 @@ static const struct object_ops device_ops = + default_unlink_name, /* unlink_name */ + device_open_file, /* open_file */ + device_get_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + device_destroy /* destroy */ + }; +@@ -221,7 +220,6 @@ static const struct object_ops device_file_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + default_fd_signaled, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + device_file_get_fd, /* get_fd */ +@@ -234,6 +232,7 @@ static const struct object_ops device_file_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + device_file_get_kernel_obj_list, /* get_kernel_obj_list */ ++ default_fd_get_fast_sync, /* get_fast_sync */ + device_file_close_handle, /* close_handle */ + device_file_destroy /* destroy */ + }; +@@ -424,7 +423,12 @@ static void add_irp_to_queue( struct device_manager *manager, struct irp_call *i + irp->thread = thread ? (struct thread *)grab_object( thread ) : NULL; + if (irp->file) list_add_tail( &irp->file->requests, &irp->dev_entry ); + list_add_tail( &manager->requests, &irp->mgr_entry ); +- if (list_head( &manager->requests ) == &irp->mgr_entry) wake_up( &manager->obj, 0 ); /* first one */ ++ if (list_head( &manager->requests ) == &irp->mgr_entry) ++ { ++ /* first one */ ++ wake_up( &manager->obj, 0 ); ++ fast_set_event( manager->fast_sync ); ++ } + } + + static struct object *device_open_file( struct object *obj, unsigned int access, +@@ -754,13 +758,13 @@ static void delete_file( struct device_file *file ) + /* terminate all pending requests */ + LIST_FOR_EACH_ENTRY_SAFE( irp, next, &file->requests, struct irp_call, dev_entry ) + { +- if (do_esync() && file->device->manager && list_empty( &file->device->manager->requests )) +- esync_clear( file->device->manager->esync_fd ); +- + list_remove( &irp->mgr_entry ); + set_irp_result( irp, STATUS_FILE_DELETED, NULL, 0, 0 ); + } + ++ if (list_empty( &file->device->manager->requests )) ++ fast_reset_event( file->device->manager->fast_sync ); ++ + release_object( file ); + } + +@@ -792,11 +796,14 @@ static int device_manager_signaled( struct object *obj, struct wait_queue_entry + return !list_empty( &manager->requests ); + } + +-static int device_manager_get_esync_fd( struct object *obj, enum esync_type *type ) ++static struct fast_sync *device_manager_get_fast_sync( struct object *obj ) + { + struct device_manager *manager = (struct device_manager *)obj; +- *type = ESYNC_MANUAL_SERVER; +- return manager->esync_fd; ++ ++ if (!manager->fast_sync) ++ manager->fast_sync = fast_create_event( FAST_SYNC_MANUAL_SERVER, !list_empty( &manager->requests ) ); ++ if (manager->fast_sync) grab_object( manager->fast_sync ); ++ return manager->fast_sync; + } + + static void device_manager_destroy( struct object *obj ) +@@ -834,8 +841,7 @@ static void device_manager_destroy( struct object *obj ) + release_object( irp ); + } + +- if (do_esync()) +- close( manager->esync_fd ); ++ if (manager->fast_sync) release_object( manager->fast_sync ); + } + + static struct device_manager *create_device_manager(void) +@@ -845,12 +851,10 @@ static struct device_manager *create_device_manager(void) + if ((manager = alloc_object( &device_manager_ops ))) + { + manager->current_call = NULL; ++ manager->fast_sync = NULL; + list_init( &manager->devices ); + list_init( &manager->requests ); + wine_rb_init( &manager->kernel_objects, compare_kernel_object ); +- +- if (do_esync()) +- manager->esync_fd = esync_create_fd( 0, 0 ); + } + return manager; + } +@@ -1037,12 +1041,13 @@ DECL_HANDLER(get_next_device_request) + } + list_remove( &irp->mgr_entry ); + list_init( &irp->mgr_entry ); ++ ++ if (list_empty( &manager->requests )) ++ fast_reset_event( manager->fast_sync ); ++ + /* we already own the object if it's only on manager queue */ + if (irp->file) grab_object( irp ); + manager->current_call = irp; +- +- if (do_esync() && list_empty( &manager->requests )) +- esync_clear( manager->esync_fd ); + } + else close_handle( current->process, reply->next ); + } +diff --git a/server/directory.c b/server/directory.c +index bc161b9..8e32abb 100644 +--- a/server/directory.c ++++ b/server/directory.c +@@ -69,7 +69,6 @@ static const struct object_ops object_type_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -82,6 +81,7 @@ static const struct object_ops object_type_ops = + default_unlink_name, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + no_destroy /* destroy */ + }; +@@ -120,7 +120,6 @@ static const struct object_ops directory_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -133,6 +132,7 @@ static const struct object_ops directory_ops = + default_unlink_name, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + directory_destroy /* destroy */ + }; +diff --git a/server/esync.c b/server/esync.c +deleted file mode 100644 +index e193f61..0000000 +--- a/server/esync.c ++++ /dev/null +@@ -1,588 +0,0 @@ +-/* +- * eventfd-based synchronization objects +- * +- * Copyright (C) 2018 Zebediah Figura +- * +- * This library is free software; you can redistribute it and/or +- * modify it under the terms of the GNU Lesser General Public +- * License as published by the Free Software Foundation; either +- * version 2.1 of the License, or (at your option) any later version. +- * +- * This library is distributed in the hope that it will be useful, +- * but WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- * Lesser General Public License for more details. +- * +- * You should have received a copy of the GNU Lesser General Public +- * License along with this library; if not, write to the Free Software +- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA +- */ +- +-#include "config.h" +- +- +-#include +-#include +-#include +-#ifdef HAVE_SYS_EVENTFD_H +-# include +-#endif +-#include +-#ifdef HAVE_SYS_STAT_H +-# include +-#endif +-#include +- +-#include "ntstatus.h" +-#define WIN32_NO_STATUS +-#include "windef.h" +-#include "winternl.h" +- +-#include "handle.h" +-#include "request.h" +-#include "file.h" +-#include "esync.h" +- +-int do_esync(void) +-{ +-#ifdef HAVE_SYS_EVENTFD_H +- static int do_esync_cached = -1; +- +- if (do_esync_cached == -1) +- do_esync_cached = getenv("WINEESYNC") && atoi(getenv("WINEESYNC")); +- +- return do_esync_cached; +-#else +- return 0; +-#endif +-} +- +-static char shm_name[29]; +-static int shm_fd; +-static off_t shm_size; +-static void **shm_addrs; +-static int shm_addrs_size; /* length of the allocated shm_addrs array */ +-static long pagesize; +- +-static void shm_cleanup(void) +-{ +- close( shm_fd ); +- if (shm_unlink( shm_name ) == -1) +- perror( "shm_unlink" ); +-} +- +-void esync_init(void) +-{ +- struct stat st; +- +- if (fstat( config_dir_fd, &st ) == -1) +- fatal_error( "cannot stat config dir\n" ); +- +- if (st.st_ino != (unsigned long)st.st_ino) +- sprintf( shm_name, "/wine-%lx%08lx-esync", (unsigned long)((unsigned long long)st.st_ino >> 32), (unsigned long)st.st_ino ); +- else +- sprintf( shm_name, "/wine-%lx-esync", (unsigned long)st.st_ino ); +- +- shm_unlink( shm_name ); +- +- shm_fd = shm_open( shm_name, O_RDWR | O_CREAT | O_EXCL, 0644 ); +- if (shm_fd == -1) +- perror( "shm_open" ); +- +- pagesize = sysconf( _SC_PAGESIZE ); +- +- shm_addrs = calloc( 128, sizeof(shm_addrs[0]) ); +- shm_addrs_size = 128; +- +- shm_size = pagesize; +- if (ftruncate( shm_fd, shm_size ) == -1) +- perror( "ftruncate" ); +- +- fprintf( stderr, "esync: up and running.\n" ); +- +- atexit( shm_cleanup ); +-} +- +-static struct list mutex_list = LIST_INIT(mutex_list); +- +-struct esync +-{ +- struct object obj; /* object header */ +- int fd; /* eventfd file descriptor */ +- enum esync_type type; +- unsigned int shm_idx; /* index into the shared memory section */ +- struct list mutex_entry; /* entry in the mutex list (if applicable) */ +-}; +- +-static void esync_dump( struct object *obj, int verbose ); +-static int esync_get_esync_fd( struct object *obj, enum esync_type *type ); +-static unsigned int esync_map_access( struct object *obj, unsigned int access ); +-static void esync_destroy( struct object *obj ); +- +-const struct object_ops esync_ops = +-{ +- sizeof(struct esync), /* size */ +- &no_type, /* type */ +- esync_dump, /* dump */ +- no_add_queue, /* add_queue */ +- NULL, /* remove_queue */ +- NULL, /* signaled */ +- esync_get_esync_fd, /* get_esync_fd */ +- NULL, /* satisfied */ +- no_signal, /* signal */ +- no_get_fd, /* get_fd */ +- esync_map_access, /* map_access */ +- default_get_sd, /* get_sd */ +- default_set_sd, /* set_sd */ +- default_get_full_name, /* get_full_name */ +- no_lookup_name, /* lookup_name */ +- directory_link_name, /* link_name */ +- default_unlink_name, /* unlink_name */ +- no_open_file, /* open_file */ +- no_kernel_obj_list, /* get_kernel_obj_list */ +- no_close_handle, /* close_handle */ +- esync_destroy /* destroy */ +-}; +- +-static void esync_dump( struct object *obj, int verbose ) +-{ +- struct esync *esync = (struct esync *)obj; +- assert( obj->ops == &esync_ops ); +- fprintf( stderr, "esync fd=%d\n", esync->fd ); +-} +- +-static int esync_get_esync_fd( struct object *obj, enum esync_type *type ) +-{ +- struct esync *esync = (struct esync *)obj; +- *type = esync->type; +- return esync->fd; +-} +- +-static unsigned int esync_map_access( struct object *obj, unsigned int access ) +-{ +- /* Sync objects have the same flags. */ +- if (access & GENERIC_READ) access |= STANDARD_RIGHTS_READ | EVENT_QUERY_STATE; +- if (access & GENERIC_WRITE) access |= STANDARD_RIGHTS_WRITE | EVENT_MODIFY_STATE; +- if (access & GENERIC_EXECUTE) access |= STANDARD_RIGHTS_EXECUTE | SYNCHRONIZE; +- if (access & GENERIC_ALL) access |= STANDARD_RIGHTS_ALL | EVENT_QUERY_STATE | EVENT_MODIFY_STATE; +- return access & ~(GENERIC_READ | GENERIC_WRITE | GENERIC_EXECUTE | GENERIC_ALL); +-} +- +-static void esync_destroy( struct object *obj ) +-{ +- struct esync *esync = (struct esync *)obj; +- if (esync->type == ESYNC_MUTEX) +- list_remove( &esync->mutex_entry ); +- close( esync->fd ); +-} +- +-static int type_matches( enum esync_type type1, enum esync_type type2 ) +-{ +- return (type1 == type2) || +- ((type1 == ESYNC_AUTO_EVENT || type1 == ESYNC_MANUAL_EVENT) && +- (type2 == ESYNC_AUTO_EVENT || type2 == ESYNC_MANUAL_EVENT)); +-} +- +-static void *get_shm( unsigned int idx ) +-{ +- int entry = (idx * 8) / pagesize; +- int offset = (idx * 8) % pagesize; +- +- if (entry >= shm_addrs_size) +- { +- int new_size = max(shm_addrs_size * 2, entry + 1); +- +- if (!(shm_addrs = realloc( shm_addrs, new_size * sizeof(shm_addrs[0]) ))) +- fprintf( stderr, "esync: couldn't expand shm_addrs array to size %d\n", entry + 1 ); +- +- memset( shm_addrs + shm_addrs_size, 0, (new_size - shm_addrs_size) * sizeof(shm_addrs[0]) ); +- +- shm_addrs_size = new_size; +- } +- +- if (!shm_addrs[entry]) +- { +- void *addr = mmap( NULL, pagesize, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, entry * pagesize ); +- if (addr == (void *)-1) +- { +- fprintf( stderr, "esync: failed to map page %d (offset %#lx): ", entry, entry * pagesize ); +- perror( "mmap" ); +- } +- +- if (debug_level) +- fprintf( stderr, "esync: Mapping page %d at %p.\n", entry, addr ); +- +- if (__sync_val_compare_and_swap( &shm_addrs[entry], 0, addr )) +- munmap( addr, pagesize ); /* someone beat us to it */ +- } +- +- return (void *)((unsigned long)shm_addrs[entry] + offset); +-} +- +-struct semaphore +-{ +- int max; +- int count; +-}; +-C_ASSERT(sizeof(struct semaphore) == 8); +- +-struct mutex +-{ +- DWORD tid; +- int count; /* recursion count */ +-}; +-C_ASSERT(sizeof(struct mutex) == 8); +- +-struct event +-{ +- int signaled; +- int locked; +-}; +-C_ASSERT(sizeof(struct event) == 8); +- +-struct esync *create_esync( struct object *root, const struct unicode_str *name, +- unsigned int attr, int initval, int max, enum esync_type type, +- const struct security_descriptor *sd ) +-{ +-#ifdef HAVE_SYS_EVENTFD_H +- struct esync *esync; +- +- if ((esync = create_named_object( root, &esync_ops, name, attr, sd ))) +- { +- if (get_error() != STATUS_OBJECT_NAME_EXISTS) +- { +- int flags = EFD_CLOEXEC | EFD_NONBLOCK; +- +- if (type == ESYNC_SEMAPHORE) +- flags |= EFD_SEMAPHORE; +- +- /* initialize it if it didn't already exist */ +- esync->fd = eventfd( initval, flags ); +- if (esync->fd == -1) +- { +- perror( "eventfd" ); +- file_set_error(); +- release_object( esync ); +- return NULL; +- } +- esync->type = type; +- +- /* Use the fd as index, since that'll be unique across all +- * processes, but should hopefully end up also allowing reuse. */ +- esync->shm_idx = esync->fd + 1; /* we keep index 0 reserved */ +- while (esync->shm_idx * 8 >= shm_size) +- { +- /* Better expand the shm section. */ +- shm_size += pagesize; +- if (ftruncate( shm_fd, shm_size ) == -1) +- { +- fprintf( stderr, "esync: couldn't expand %s to size %ld: ", +- shm_name, (long)shm_size ); +- perror( "ftruncate" ); +- } +- } +- +- /* Initialize the shared memory portion. We want to do this on the +- * server side to avoid a potential though unlikely race whereby +- * the same object is opened and used between the time it's created +- * and the time its shared memory portion is initialized. */ +- switch (type) +- { +- case ESYNC_SEMAPHORE: +- { +- struct semaphore *semaphore = get_shm( esync->shm_idx ); +- semaphore->max = max; +- semaphore->count = initval; +- break; +- } +- case ESYNC_AUTO_EVENT: +- case ESYNC_MANUAL_EVENT: +- { +- struct event *event = get_shm( esync->shm_idx ); +- event->signaled = initval ? 1 : 0; +- event->locked = 0; +- break; +- } +- case ESYNC_MUTEX: +- { +- struct mutex *mutex = get_shm( esync->shm_idx ); +- mutex->tid = initval ? 0 : current->id; +- mutex->count = initval ? 0 : 1; +- list_add_tail( &mutex_list, &esync->mutex_entry ); +- break; +- } +- default: +- assert( 0 ); +- } +- } +- else +- { +- /* validate the type */ +- if (!type_matches( type, esync->type )) +- { +- release_object( &esync->obj ); +- set_error( STATUS_OBJECT_TYPE_MISMATCH ); +- return NULL; +- } +- } +- } +- return esync; +-#else +- /* FIXME: Provide a fallback implementation using pipe(). */ +- set_error( STATUS_NOT_IMPLEMENTED ); +- return NULL; +-#endif +-} +- +-/* Create a file descriptor for an existing handle. +- * Caller must close the handle when it's done; it's not linked to an esync +- * server object in any way. */ +-int esync_create_fd( int initval, int flags ) +-{ +-#ifdef HAVE_SYS_EVENTFD_H +- int fd; +- +- fd = eventfd( initval, flags | EFD_CLOEXEC | EFD_NONBLOCK ); +- if (fd == -1) +- perror( "eventfd" ); +- +- return fd; +-#else +- return -1; +-#endif +-} +- +-/* Wake up a specific fd. */ +-void esync_wake_fd( int fd ) +-{ +- static const uint64_t value = 1; +- +- if (write( fd, &value, sizeof(value) ) == -1) +- perror( "esync: write" ); +-} +- +-/* Wake up a server-side esync object. */ +-void esync_wake_up( struct object *obj ) +-{ +- enum esync_type dummy; +- int fd; +- +- if (obj->ops->get_esync_fd) +- { +- fd = obj->ops->get_esync_fd( obj, &dummy ); +- esync_wake_fd( fd ); +- } +-} +- +-void esync_clear( int fd ) +-{ +- uint64_t value; +- +- /* we don't care about the return value */ +- read( fd, &value, sizeof(value) ); +-} +- +-static inline void small_pause(void) +-{ +-#ifdef __i386__ +- __asm__ __volatile__( "rep;nop" : : : "memory" ); +-#else +- __asm__ __volatile__( "" : : : "memory" ); +-#endif +-} +- +-/* Server-side event support. */ +-void esync_set_event( struct esync *esync ) +-{ +- static const uint64_t value = 1; +- struct event *event = get_shm( esync->shm_idx ); +- +- assert( esync->obj.ops == &esync_ops ); +- assert( event != NULL ); +- +- if (debug_level) +- fprintf( stderr, "esync_set_event() fd=%d\n", esync->fd ); +- +- if (esync->type == ESYNC_MANUAL_EVENT) +- { +- /* Acquire the spinlock. */ +- while (__sync_val_compare_and_swap( &event->locked, 0, 1 )) +- small_pause(); +- } +- +- if (!__atomic_exchange_n( &event->signaled, 1, __ATOMIC_SEQ_CST )) +- { +- if (write( esync->fd, &value, sizeof(value) ) == -1) +- perror( "esync: write" ); +- } +- +- if (esync->type == ESYNC_MANUAL_EVENT) +- { +- /* Release the spinlock. */ +- event->locked = 0; +- } +-} +- +-void esync_reset_event( struct esync *esync ) +-{ +- static uint64_t value = 1; +- struct event *event = get_shm( esync->shm_idx ); +- +- assert( esync->obj.ops == &esync_ops ); +- assert( event != NULL ); +- +- if (debug_level) +- fprintf( stderr, "esync_reset_event() fd=%d\n", esync->fd ); +- +- if (esync->type == ESYNC_MANUAL_EVENT) +- { +- /* Acquire the spinlock. */ +- while (__sync_val_compare_and_swap( &event->locked, 0, 1 )) +- small_pause(); +- } +- +- /* Only bother signaling the fd if we weren't already signaled. */ +- if (__atomic_exchange_n( &event->signaled, 0, __ATOMIC_SEQ_CST )) +- { +- /* we don't care about the return value */ +- read( esync->fd, &value, sizeof(value) ); +- } +- +- if (esync->type == ESYNC_MANUAL_EVENT) +- { +- /* Release the spinlock. */ +- event->locked = 0; +- } +-} +- +-void esync_abandon_mutexes( struct thread *thread ) +-{ +- struct esync *esync; +- +- LIST_FOR_EACH_ENTRY( esync, &mutex_list, struct esync, mutex_entry ) +- { +- struct mutex *mutex = get_shm( esync->shm_idx ); +- +- if (mutex->tid == thread->id) +- { +- if (debug_level) +- fprintf( stderr, "esync_abandon_mutexes() fd=%d\n", esync->fd ); +- mutex->tid = ~0; +- mutex->count = 0; +- esync_wake_fd( esync->fd ); +- } +- } +-} +- +-DECL_HANDLER(create_esync) +-{ +- struct esync *esync; +- struct unicode_str name; +- struct object *root; +- const struct security_descriptor *sd; +- const struct object_attributes *objattr = get_req_object_attributes( &sd, &name, &root ); +- +- if (!do_esync()) +- { +- set_error( STATUS_NOT_IMPLEMENTED ); +- return; +- } +- +- if (!req->type) +- { +- set_error( STATUS_INVALID_PARAMETER ); +- return; +- } +- +- if (!objattr) return; +- +- if ((esync = create_esync( root, &name, objattr->attributes, req->initval, req->max, req->type, sd ))) +- { +- if (get_error() == STATUS_OBJECT_NAME_EXISTS) +- reply->handle = alloc_handle( current->process, esync, req->access, objattr->attributes ); +- else +- reply->handle = alloc_handle_no_access_check( current->process, esync, +- req->access, objattr->attributes ); +- +- reply->type = esync->type; +- reply->shm_idx = esync->shm_idx; +- send_client_fd( current->process, esync->fd, reply->handle ); +- release_object( esync ); +- } +- +- if (root) release_object( root ); +-} +- +-DECL_HANDLER(open_esync) +-{ +- struct unicode_str name = get_req_unicode_str(); +- +- reply->handle = open_object( current->process, req->rootdir, req->access, +- &esync_ops, &name, req->attributes ); +- +- /* send over the fd */ +- if (reply->handle) +- { +- struct esync *esync; +- +- if (!(esync = (struct esync *)get_handle_obj( current->process, reply->handle, +- 0, &esync_ops ))) +- return; +- +- if (!type_matches( req->type, esync->type )) +- { +- set_error( STATUS_OBJECT_TYPE_MISMATCH ); +- release_object( esync ); +- return; +- } +- +- reply->type = esync->type; +- reply->shm_idx = esync->shm_idx; +- +- send_client_fd( current->process, esync->fd, reply->handle ); +- release_object( esync ); +- } +-} +- +-/* Retrieve a file descriptor for an esync object which will be signaled by the +- * server. The client should only read from (i.e. wait on) this object. */ +-DECL_HANDLER(get_esync_fd) +-{ +- struct object *obj; +- enum esync_type type; +- int fd; +- +- if (!(obj = get_handle_obj( current->process, req->handle, SYNCHRONIZE, NULL ))) +- return; +- +- if (obj->ops->get_esync_fd) +- { +- fd = obj->ops->get_esync_fd( obj, &type ); +- reply->type = type; +- if (obj->ops == &esync_ops) +- { +- struct esync *esync = (struct esync *)obj; +- reply->shm_idx = esync->shm_idx; +- } +- else +- reply->shm_idx = 0; +- send_client_fd( current->process, fd, req->handle ); +- } +- else +- { +- if (debug_level) +- { +- fprintf( stderr, "%04x: esync: can't wait on object: ", current->id ); +- obj->ops->dump( obj, 0 ); +- } +- set_error( STATUS_NOT_IMPLEMENTED ); +- } +- +- release_object( obj ); +-} +- +-/* Return the fd used for waiting on user APCs. */ +-DECL_HANDLER(get_esync_apc_fd) +-{ +- send_client_fd( current->process, current->esync_apc_fd, current->id ); +-} +diff --git a/server/esync.h b/server/esync.h +deleted file mode 100644 +index d39f4ef..0000000 +--- a/server/esync.h ++++ /dev/null +@@ -1,35 +0,0 @@ +-/* +- * eventfd-based synchronization objects +- * +- * Copyright (C) 2018 Zebediah Figura +- * +- * This library is free software; you can redistribute it and/or +- * modify it under the terms of the GNU Lesser General Public +- * License as published by the Free Software Foundation; either +- * version 2.1 of the License, or (at your option) any later version. +- * +- * This library is distributed in the hope that it will be useful, +- * but WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- * Lesser General Public License for more details. +- * +- * You should have received a copy of the GNU Lesser General Public +- * License along with this library; if not, write to the Free Software +- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA +- */ +- +-#include +- +-extern int do_esync(void); +-void esync_init(void); +-int esync_create_fd( int initval, int flags ); +-void esync_wake_fd( int fd ); +-void esync_wake_up( struct object *obj ); +-void esync_clear( int fd ); +- +-struct esync; +- +-extern const struct object_ops esync_ops; +-void esync_set_event( struct esync *esync ); +-void esync_reset_event( struct esync *esync ); +-void esync_abandon_mutexes( struct thread *thread ); +diff --git a/server/event.c b/server/event.c +index f4ca3e4..b750a22 100644 +--- a/server/event.c ++++ b/server/event.c +@@ -35,7 +35,6 @@ + #include "thread.h" + #include "request.h" + #include "security.h" +-#include "esync.h" + + static const WCHAR event_name[] = {'E','v','e','n','t'}; + +@@ -57,15 +56,15 @@ struct event + struct list kernel_object; /* list of kernel object pointers */ + int manual_reset; /* is it a manual reset event? */ + int signaled; /* event has been signaled */ +- int esync_fd; /* esync file descriptor */ ++ struct fast_sync *fast_sync; /* fast synchronization object */ + }; + + static void event_dump( struct object *obj, int verbose ); + static int event_signaled( struct object *obj, struct wait_queue_entry *entry ); + static void event_satisfied( struct object *obj, struct wait_queue_entry *entry ); +-static int event_get_esync_fd( struct object *obj, enum esync_type *type ); + static int event_signal( struct object *obj, unsigned int access); + static struct list *event_get_kernel_obj_list( struct object *obj ); ++static struct fast_sync *event_get_fast_sync( struct object *obj ); + static void event_destroy( struct object *obj ); + + static const struct object_ops event_ops = +@@ -76,7 +75,6 @@ static const struct object_ops event_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + event_signaled, /* signaled */ +- event_get_esync_fd, /* get_esync_fd */ + event_satisfied, /* satisfied */ + event_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -89,6 +87,7 @@ static const struct object_ops event_ops = + default_unlink_name, /* unlink_name */ + no_open_file, /* open_file */ + event_get_kernel_obj_list, /* get_kernel_obj_list */ ++ event_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + event_destroy /* destroy */ + }; +@@ -111,10 +110,13 @@ struct type_descr keyed_event_type = + struct keyed_event + { + struct object obj; /* object header */ ++ struct fast_sync *fast_sync; /* fast synchronization object */ + }; + + static void keyed_event_dump( struct object *obj, int verbose ); + static int keyed_event_signaled( struct object *obj, struct wait_queue_entry *entry ); ++static struct fast_sync *keyed_event_get_fast_sync( struct object *obj ); ++static void keyed_event_destroy( struct object *obj ); + + static const struct object_ops keyed_event_ops = + { +@@ -124,7 +126,6 @@ static const struct object_ops keyed_event_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + keyed_event_signaled, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -137,8 +138,9 @@ static const struct object_ops keyed_event_ops = + default_unlink_name, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ keyed_event_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ +- no_destroy /* destroy */ ++ keyed_event_destroy /* destroy */ + }; + + +@@ -156,9 +158,7 @@ struct event *create_event( struct object *root, const struct unicode_str *name, + list_init( &event->kernel_object ); + event->manual_reset = manual_reset; + event->signaled = initial_state; +- +- if (do_esync()) +- event->esync_fd = esync_create_fd( initial_state, 0 ); ++ event->fast_sync = NULL; + } + } + return event; +@@ -166,10 +166,6 @@ struct event *create_event( struct object *root, const struct unicode_str *name, + + struct event *get_event_obj( struct process *process, obj_handle_t handle, unsigned int access ) + { +- struct object *obj; +- if (do_esync() && (obj = get_handle_obj( process, handle, access, &esync_ops))) +- return (struct event *)obj; /* even though it's not an event */ +- + return (struct event *)get_handle_obj( process, handle, access, &event_ops ); + } + +@@ -183,28 +179,16 @@ static void pulse_event( struct event *event ) + + void set_event( struct event *event ) + { +- if (do_esync() && event->obj.ops == &esync_ops) +- { +- esync_set_event( (struct esync *)event ); +- return; +- } +- + event->signaled = 1; + /* wake up all waiters if manual reset, a single one otherwise */ + wake_up( &event->obj, !event->manual_reset ); ++ fast_set_event( event->fast_sync ); + } + + void reset_event( struct event *event ) + { +- if (do_esync() && event->obj.ops == &esync_ops) +- { +- esync_reset_event( (struct esync *)event ); +- return; +- } + event->signaled = 0; +- +- if (do_esync()) +- esync_clear( event->esync_fd ); ++ fast_reset_event( event->fast_sync ); + } + + static void event_dump( struct object *obj, int verbose ) +@@ -222,13 +206,6 @@ static int event_signaled( struct object *obj, struct wait_queue_entry *entry ) + return event->signaled; + } + +-static int event_get_esync_fd( struct object *obj, enum esync_type *type ) +-{ +- struct event *event = (struct event *)obj; +- *type = event->manual_reset ? ESYNC_MANUAL_SERVER : ESYNC_AUTO_SERVER; +- return event->esync_fd; +-} +- + static void event_satisfied( struct object *obj, struct wait_queue_entry *entry ) + { + struct event *event = (struct event *)obj; +@@ -257,12 +234,24 @@ static struct list *event_get_kernel_obj_list( struct object *obj ) + return &event->kernel_object; + } + ++static struct fast_sync *event_get_fast_sync( struct object *obj ) ++{ ++ struct event *event = (struct event *)obj; ++ ++ if (!event->fast_sync) ++ { ++ enum fast_sync_type type = event->manual_reset ? FAST_SYNC_MANUAL_EVENT : FAST_SYNC_AUTO_EVENT; ++ event->fast_sync = fast_create_event( type, event->signaled ); ++ } ++ if (event->fast_sync) grab_object( event->fast_sync ); ++ return event->fast_sync; ++} ++ + static void event_destroy( struct object *obj ) + { + struct event *event = (struct event *)obj; + +- if (do_esync()) +- close( event->esync_fd ); ++ if (event->fast_sync) release_object( event->fast_sync ); + } + + struct keyed_event *create_keyed_event( struct object *root, const struct unicode_str *name, +@@ -275,6 +264,7 @@ struct keyed_event *create_keyed_event( struct object *root, const struct unicod + if (get_error() != STATUS_OBJECT_NAME_EXISTS) + { + /* initialize it if it didn't already exist */ ++ event->fast_sync = NULL; + } + } + return event; +@@ -318,6 +308,23 @@ static int keyed_event_signaled( struct object *obj, struct wait_queue_entry *en + return 0; + } + ++static struct fast_sync *keyed_event_get_fast_sync( struct object *obj ) ++{ ++ struct keyed_event *event = (struct keyed_event *)obj; ++ ++ if (!event->fast_sync) ++ event->fast_sync = fast_create_event( FAST_SYNC_MANUAL_SERVER, 1 ); ++ if (event->fast_sync) grab_object( event->fast_sync ); ++ return event->fast_sync; ++} ++ ++static void keyed_event_destroy( struct object *obj ) ++{ ++ struct keyed_event *event = (struct keyed_event *)obj; ++ ++ if (event->fast_sync) release_object( event->fast_sync ); ++} ++ + /* create an event */ + DECL_HANDLER(create_event) + { +diff --git a/server/fast_sync.c b/server/fast_sync.c +new file mode 100644 +index 0000000..fed6eb9 +--- /dev/null ++++ b/server/fast_sync.c +@@ -0,0 +1,434 @@ ++/* ++ * Fast synchronization primitives ++ * ++ * Copyright (C) 2021-2022 Elizabeth Figura for CodeWeavers ++ * ++ * This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public ++ * License along with this library; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA ++ */ ++ ++#include "config.h" ++ ++#include ++#include ++#include ++#include ++ ++#include "ntstatus.h" ++#define WIN32_NO_STATUS ++#include "winternl.h" ++ ++#include "file.h" ++#include "handle.h" ++#include "request.h" ++#include "thread.h" ++ ++#ifdef HAVE_LINUX_NTSYNC_H ++ ++#include ++#include ++#include ++#include ++#include ++ ++struct linux_device ++{ ++ struct object obj; /* object header */ ++ struct fd *fd; /* fd for unix fd */ ++}; ++ ++static struct linux_device *linux_device_object; ++ ++static void linux_device_dump( struct object *obj, int verbose ); ++static struct fd *linux_device_get_fd( struct object *obj ); ++static void linux_device_destroy( struct object *obj ); ++static enum server_fd_type fast_sync_get_fd_type( struct fd *fd ); ++ ++static const struct object_ops linux_device_ops = ++{ ++ sizeof(struct linux_device), /* size */ ++ &no_type, /* type */ ++ linux_device_dump, /* dump */ ++ no_add_queue, /* add_queue */ ++ NULL, /* remove_queue */ ++ NULL, /* signaled */ ++ NULL, /* satisfied */ ++ no_signal, /* signal */ ++ linux_device_get_fd, /* get_fd */ ++ default_map_access, /* map_access */ ++ default_get_sd, /* get_sd */ ++ default_set_sd, /* set_sd */ ++ no_get_full_name, /* get_full_name */ ++ no_lookup_name, /* lookup_name */ ++ no_link_name, /* link_name */ ++ NULL, /* unlink_name */ ++ no_open_file, /* open_file */ ++ no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ ++ no_close_handle, /* close_handle */ ++ linux_device_destroy /* destroy */ ++}; ++ ++static const struct fd_ops fast_sync_fd_ops = ++{ ++ default_fd_get_poll_events, /* get_poll_events */ ++ default_poll_event, /* poll_event */ ++ fast_sync_get_fd_type, /* get_fd_type */ ++ no_fd_read, /* read */ ++ no_fd_write, /* write */ ++ no_fd_flush, /* flush */ ++ no_fd_get_file_info, /* get_file_info */ ++ no_fd_get_volume_info, /* get_volume_info */ ++ no_fd_ioctl, /* ioctl */ ++ default_fd_cancel_async, /* cancel_async */ ++ no_fd_queue_async, /* queue_async */ ++ default_fd_reselect_async /* reselect_async */ ++}; ++ ++static void linux_device_dump( struct object *obj, int verbose ) ++{ ++ struct linux_device *device = (struct linux_device *)obj; ++ assert( obj->ops == &linux_device_ops ); ++ fprintf( stderr, "Fast synchronization device fd=%p\n", device->fd ); ++} ++ ++static struct fd *linux_device_get_fd( struct object *obj ) ++{ ++ struct linux_device *device = (struct linux_device *)obj; ++ return (struct fd *)grab_object( device->fd ); ++} ++ ++static void linux_device_destroy( struct object *obj ) ++{ ++ struct linux_device *device = (struct linux_device *)obj; ++ assert( obj->ops == &linux_device_ops ); ++ if (device->fd) release_object( device->fd ); ++ linux_device_object = NULL; ++} ++ ++static enum server_fd_type fast_sync_get_fd_type( struct fd *fd ) ++{ ++ return FD_TYPE_FILE; ++} ++ ++static struct linux_device *get_linux_device(void) ++{ ++ struct linux_device *device; ++ static int initialized; ++ int unix_fd; ++ ++ if (initialized) ++ { ++ if (linux_device_object) ++ grab_object( linux_device_object ); ++ else ++ set_error( STATUS_NOT_IMPLEMENTED ); ++ return linux_device_object; ++ } ++ ++ if (getenv( "WINE_DISABLE_FAST_SYNC" ) && atoi( getenv( "WINE_DISABLE_FAST_SYNC" ) )) ++ { ++ static int once; ++ set_error( STATUS_NOT_IMPLEMENTED ); ++ if (!once++) fprintf(stderr, "ntsync is explicitly disabled.\n"); ++ initialized = 1; ++ return NULL; ++ } ++ ++ unix_fd = open( "/dev/ntsync", O_CLOEXEC | O_RDONLY ); ++ if (unix_fd == -1) ++ { ++ static int once; ++ file_set_error(); ++ if (!once++) fprintf(stderr, "Cannot open /dev/ntsync: %s\n", strerror(errno)); ++ initialized = 1; ++ return NULL; ++ } ++ ++ if (!(device = alloc_object( &linux_device_ops ))) ++ { ++ close( unix_fd ); ++ set_error( STATUS_NO_MEMORY ); ++ initialized = 1; ++ return NULL; ++ } ++ ++ if (!(device->fd = create_anonymous_fd( &fast_sync_fd_ops, unix_fd, &device->obj, 0 ))) ++ { ++ release_object( device ); ++ initialized = 1; ++ return NULL; ++ } ++ ++ fprintf( stderr, "wine: using fast synchronization.\n" ); ++ linux_device_object = device; ++ initialized = 1; ++ return device; ++} ++ ++struct fast_sync ++{ ++ struct object obj; ++ enum fast_sync_type type; ++ struct fd *fd; ++}; ++ ++static void linux_obj_dump( struct object *obj, int verbose ); ++static void linux_obj_destroy( struct object *obj ); ++static struct fd *linux_obj_get_fd( struct object *obj ); ++ ++static const struct object_ops linux_obj_ops = ++{ ++ sizeof(struct fast_sync), /* size */ ++ &no_type, /* type */ ++ linux_obj_dump, /* dump */ ++ no_add_queue, /* add_queue */ ++ NULL, /* remove_queue */ ++ NULL, /* signaled */ ++ NULL, /* satisfied */ ++ no_signal, /* signal */ ++ linux_obj_get_fd, /* get_fd */ ++ default_map_access, /* map_access */ ++ default_get_sd, /* get_sd */ ++ default_set_sd, /* set_sd */ ++ no_get_full_name, /* get_full_name */ ++ no_lookup_name, /* lookup_name */ ++ no_link_name, /* link_name */ ++ NULL, /* unlink_name */ ++ no_open_file, /* open_file */ ++ no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ ++ no_close_handle, /* close_handle */ ++ linux_obj_destroy /* destroy */ ++}; ++ ++static void linux_obj_dump( struct object *obj, int verbose ) ++{ ++ struct fast_sync *fast_sync = (struct fast_sync *)obj; ++ assert( obj->ops == &linux_obj_ops ); ++ fprintf( stderr, "Fast synchronization object type=%u fd=%p\n", fast_sync->type, fast_sync->fd ); ++} ++ ++static void linux_obj_destroy( struct object *obj ) ++{ ++ struct fast_sync *fast_sync = (struct fast_sync *)obj; ++ assert( obj->ops == &linux_obj_ops ); ++ if (fast_sync->fd) release_object( fast_sync->fd ); ++} ++ ++static struct fd *linux_obj_get_fd( struct object *obj ) ++{ ++ struct fast_sync *fast_sync = (struct fast_sync *)obj; ++ assert( obj->ops == &linux_obj_ops ); ++ return (struct fd *)grab_object( fast_sync->fd ); ++} ++ ++static struct fast_sync *create_fast_sync( enum fast_sync_type type, int unix_fd ) ++{ ++ struct fast_sync *fast_sync; ++ ++ if (!(fast_sync = alloc_object( &linux_obj_ops ))) ++ { ++ close( unix_fd ); ++ return NULL; ++ } ++ ++ fast_sync->type = type; ++ ++ if (!(fast_sync->fd = create_anonymous_fd( &fast_sync_fd_ops, unix_fd, &fast_sync->obj, 0 ))) ++ { ++ release_object( fast_sync ); ++ return NULL; ++ } ++ ++ return fast_sync; ++} ++ ++struct fast_sync *fast_create_event( enum fast_sync_type type, int signaled ) ++{ ++ struct ntsync_event_args args = {0}; ++ struct linux_device *device; ++ ++ if (!(device = get_linux_device())) return NULL; ++ ++ args.signaled = signaled; ++ switch (type) ++ { ++ case FAST_SYNC_AUTO_EVENT: ++ case FAST_SYNC_AUTO_SERVER: ++ args.manual = 0; ++ break; ++ ++ case FAST_SYNC_MANUAL_EVENT: ++ case FAST_SYNC_MANUAL_SERVER: ++ case FAST_SYNC_QUEUE: ++ args.manual = 1; ++ break; ++ ++ case FAST_SYNC_MUTEX: ++ case FAST_SYNC_SEMAPHORE: ++ assert(0); ++ break; ++ } ++ if (ioctl( get_unix_fd( device->fd ), NTSYNC_IOC_CREATE_EVENT, &args ) < 0) ++ { ++ file_set_error(); ++ release_object( device ); ++ return NULL; ++ } ++ release_object( device ); ++ ++ return create_fast_sync( type, args.event ); ++} ++ ++struct fast_sync *fast_create_semaphore( unsigned int count, unsigned int max ) ++{ ++ struct ntsync_sem_args args = {0}; ++ struct linux_device *device; ++ ++ if (!(device = get_linux_device())) return NULL; ++ ++ args.count = count; ++ args.max = max; ++ if (ioctl( get_unix_fd( device->fd ), NTSYNC_IOC_CREATE_SEM, &args ) < 0) ++ { ++ file_set_error(); ++ release_object( device ); ++ return NULL; ++ } ++ ++ release_object( device ); ++ ++ return create_fast_sync( FAST_SYNC_SEMAPHORE, args.sem ); ++} ++ ++struct fast_sync *fast_create_mutex( thread_id_t owner, unsigned int count ) ++{ ++ struct ntsync_mutex_args args = {0}; ++ struct linux_device *device; ++ ++ if (!(device = get_linux_device())) return NULL; ++ ++ args.owner = owner; ++ args.count = count; ++ if (ioctl( get_unix_fd( device->fd ), NTSYNC_IOC_CREATE_MUTEX, &args ) < 0) ++ { ++ file_set_error(); ++ release_object( device ); ++ return NULL; ++ } ++ ++ release_object( device ); ++ ++ return create_fast_sync( FAST_SYNC_MUTEX, args.mutex ); ++} ++ ++void fast_set_event( struct fast_sync *fast_sync ) ++{ ++ __u32 count; ++ ++ if (!fast_sync) return; ++ ++ if (debug_level) fprintf( stderr, "fast_set_event %p\n", fast_sync->fd ); ++ ++ ioctl( get_unix_fd( fast_sync->fd ), NTSYNC_IOC_EVENT_SET, &count ); ++} ++ ++void fast_reset_event( struct fast_sync *fast_sync ) ++{ ++ __u32 count; ++ ++ if (!fast_sync) return; ++ ++ if (debug_level) fprintf( stderr, "fast_set_event %p\n", fast_sync->fd ); ++ ++ ioctl( get_unix_fd( fast_sync->fd ), NTSYNC_IOC_EVENT_RESET, &count ); ++} ++ ++void fast_abandon_mutex( thread_id_t tid, struct fast_sync *fast_sync ) ++{ ++ ioctl( get_unix_fd( fast_sync->fd ), NTSYNC_IOC_MUTEX_KILL, &tid ); ++} ++ ++#else ++ ++struct fast_sync *fast_create_event( enum fast_sync_type type, int signaled ) ++{ ++ set_error( STATUS_NOT_IMPLEMENTED ); ++ return NULL; ++} ++ ++struct fast_sync *fast_create_semaphore( unsigned int count, unsigned int max ) ++{ ++ set_error( STATUS_NOT_IMPLEMENTED ); ++ return NULL; ++} ++ ++struct fast_sync *fast_create_mutex( thread_id_t owner, unsigned int count ) ++{ ++ set_error( STATUS_NOT_IMPLEMENTED ); ++ return NULL; ++} ++ ++void fast_set_event( struct fast_sync *fast_sync ) ++{ ++} ++ ++void fast_reset_event( struct fast_sync *obj ) ++{ ++} ++ ++void fast_abandon_mutex( thread_id_t tid, struct fast_sync *fast_sync ) ++{ ++} ++ ++#endif ++ ++DECL_HANDLER(get_linux_sync_device) ++{ ++#ifdef HAVE_LINUX_NTSYNC_H ++ struct linux_device *device; ++ ++ if ((device = get_linux_device())) ++ { ++ reply->handle = alloc_handle( current->process, device, 0, 0 ); ++ release_object( device ); ++ } ++#else ++ set_error( STATUS_NOT_IMPLEMENTED ); ++#endif ++} ++ ++DECL_HANDLER(get_linux_sync_obj) ++{ ++#ifdef HAVE_LINUX_NTSYNC_H ++ struct object *obj; ++ ++ if ((obj = get_handle_obj( current->process, req->handle, 0, NULL ))) ++ { ++ struct fast_sync *fast_sync; ++ ++ if ((fast_sync = obj->ops->get_fast_sync( obj ))) ++ { ++ reply->handle = alloc_handle( current->process, fast_sync, 0, 0 ); ++ reply->type = fast_sync->type; ++ reply->access = get_handle_access( current->process, req->handle ); ++ release_object( fast_sync ); ++ } ++ release_object( obj ); ++ } ++#else ++ set_error( STATUS_NOT_IMPLEMENTED ); ++#endif ++} +diff --git a/server/fd.c b/server/fd.c +index a6782b0..f00e22d 100644 +--- a/server/fd.c ++++ b/server/fd.c +@@ -97,7 +97,6 @@ + #include "handle.h" + #include "process.h" + #include "request.h" +-#include "esync.h" + + #include "winternl.h" + #include "winioctl.h" +@@ -162,7 +161,7 @@ struct fd + struct completion *completion; /* completion object attached to this fd */ + apc_param_t comp_key; /* completion key to set in completion events */ + unsigned int comp_flags; /* completion flags */ +- int esync_fd; /* esync file descriptor */ ++ struct fast_sync *fast_sync; /* fast synchronization object */ + }; + + static void fd_dump( struct object *obj, int verbose ); +@@ -176,7 +175,6 @@ static const struct object_ops fd_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -189,6 +187,7 @@ static const struct object_ops fd_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + fd_destroy /* destroy */ + }; +@@ -218,7 +217,6 @@ static const struct object_ops device_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -231,6 +229,7 @@ static const struct object_ops device_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + device_destroy /* destroy */ + }; +@@ -259,7 +258,6 @@ static const struct object_ops inode_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -272,6 +270,7 @@ static const struct object_ops inode_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + inode_destroy /* destroy */ + }; +@@ -302,7 +301,6 @@ static const struct object_ops file_lock_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + file_lock_signaled, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -315,6 +313,7 @@ static const struct object_ops file_lock_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + no_destroy /* destroy */ + }; +@@ -1651,9 +1650,7 @@ static void fd_destroy( struct object *obj ) + if (fd->unix_fd != -1) close( fd->unix_fd ); + free( fd->unix_name ); + } +- +- if (do_esync()) +- close( fd->esync_fd ); ++ if (fd->fast_sync) release_object( fd->fast_sync ); + } + + /* check if the desired access is possible without violating */ +@@ -1772,16 +1769,13 @@ static struct fd *alloc_fd_object(void) + fd->poll_index = -1; + fd->completion = NULL; + fd->comp_flags = 0; +- fd->esync_fd = -1; ++ fd->fast_sync = NULL; + init_async_queue( &fd->read_q ); + init_async_queue( &fd->write_q ); + init_async_queue( &fd->wait_q ); + list_init( &fd->inode_entry ); + list_init( &fd->locks ); + +- if (do_esync()) +- fd->esync_fd = esync_create_fd( 1, 0 ); +- + if ((fd->poll_index = add_poll_user( fd )) == -1) + { + release_object( fd ); +@@ -1816,16 +1810,13 @@ struct fd *alloc_pseudo_fd( const struct fd_ops *fd_user_ops, struct object *use + fd->poll_index = -1; + fd->completion = NULL; + fd->comp_flags = 0; ++ fd->fast_sync = NULL; + fd->no_fd_status = STATUS_BAD_DEVICE_TYPE; +- fd->esync_fd = -1; + init_async_queue( &fd->read_q ); + init_async_queue( &fd->write_q ); + init_async_queue( &fd->wait_q ); + list_init( &fd->inode_entry ); + list_init( &fd->locks ); +- +- if (do_esync()) +- fd->esync_fd = esync_create_fd( 0, 0 ); + return fd; + } + +@@ -2282,10 +2273,15 @@ void set_fd_signaled( struct fd *fd, int signaled ) + { + if (fd->comp_flags & FILE_SKIP_SET_EVENT_ON_HANDLE) return; + fd->signaled = signaled; +- if (signaled) wake_up( fd->user, 0 ); +- +- if (do_esync() && !signaled) +- esync_clear( fd->esync_fd ); ++ if (signaled) ++ { ++ wake_up( fd->user, 0 ); ++ fast_set_event( fd->fast_sync ); ++ } ++ else ++ { ++ fast_reset_event( fd->fast_sync ); ++ } + } + + /* check if events are pending and if yes return which one(s) */ +@@ -2311,12 +2307,16 @@ int default_fd_signaled( struct object *obj, struct wait_queue_entry *entry ) + return ret; + } + +-int default_fd_get_esync_fd( struct object *obj, enum esync_type *type ) ++struct fast_sync *default_fd_get_fast_sync( struct object *obj ) + { + struct fd *fd = get_obj_fd( obj ); +- int ret = fd->esync_fd; +- *type = ESYNC_MANUAL_SERVER; ++ struct fast_sync *ret; ++ ++ if (!fd->fast_sync) ++ fd->fast_sync = fast_create_event( FAST_SYNC_MANUAL_SERVER, fd->signaled ); ++ ret = fd->fast_sync; + release_object( fd ); ++ if (ret) grab_object( ret ); + return ret; + } + +diff --git a/server/file.c b/server/file.c +index 828a21c..c6bdbef 100644 +--- a/server/file.c ++++ b/server/file.c +@@ -123,7 +123,6 @@ static const struct object_ops file_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + default_fd_signaled, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + file_get_fd, /* get_fd */ +@@ -136,6 +135,7 @@ static const struct object_ops file_ops = + NULL, /* unlink_name */ + file_open_file, /* open_file */ + file_get_kernel_obj_list, /* get_kernel_obj_list */ ++ default_fd_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + file_destroy /* destroy */ + }; +diff --git a/server/file.h b/server/file.h +index abad355..6b4fa51 100644 +--- a/server/file.h ++++ b/server/file.h +@@ -108,7 +108,7 @@ extern char *dup_fd_name( struct fd *root, const char *name ) __WINE_DEALLOC(fre + extern void get_nt_name( struct fd *fd, struct unicode_str *name ); + + extern int default_fd_signaled( struct object *obj, struct wait_queue_entry *entry ); +-extern int default_fd_get_esync_fd( struct object *obj, enum esync_type *type ); ++extern struct fast_sync *default_fd_get_fast_sync( struct object *obj ); + extern int default_fd_get_poll_events( struct fd *fd ); + extern void default_poll_event( struct fd *fd, int event ); + extern void fd_cancel_async( struct fd *fd, struct async *async ); +diff --git a/server/handle.c b/server/handle.c +index cf6afe4..e07f32c 100644 +--- a/server/handle.c ++++ b/server/handle.c +@@ -126,7 +126,6 @@ static const struct object_ops handle_table_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -139,6 +138,7 @@ static const struct object_ops handle_table_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + handle_table_destroy /* destroy */ + }; +diff --git a/server/hook.c b/server/hook.c +index da351d6..5a00699 100644 +--- a/server/hook.c ++++ b/server/hook.c +@@ -80,7 +80,6 @@ static const struct object_ops hook_table_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -93,6 +92,7 @@ static const struct object_ops hook_table_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + hook_table_destroy /* destroy */ + }; +diff --git a/server/mailslot.c b/server/mailslot.c +index 4cf9b73..d9807b4 100644 +--- a/server/mailslot.c ++++ b/server/mailslot.c +@@ -74,7 +74,6 @@ static const struct object_ops mailslot_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + default_fd_signaled, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + mailslot_get_fd, /* get_fd */ +@@ -87,6 +86,7 @@ static const struct object_ops mailslot_ops = + default_unlink_name, /* unlink_name */ + mailslot_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ default_fd_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + mailslot_destroy /* destroy */ + }; +@@ -134,7 +134,6 @@ static const struct object_ops mail_writer_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + mail_writer_get_fd, /* get_fd */ +@@ -147,6 +146,7 @@ static const struct object_ops mail_writer_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + mail_writer_destroy /* destroy */ + }; +@@ -198,7 +198,6 @@ static const struct object_ops mailslot_device_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -211,6 +210,7 @@ static const struct object_ops mailslot_device_ops = + default_unlink_name, /* unlink_name */ + mailslot_device_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + mailslot_device_destroy /* destroy */ + }; +@@ -229,7 +229,6 @@ static const struct object_ops mailslot_device_file_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + default_fd_signaled, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + mailslot_device_file_get_fd, /* get_fd */ +@@ -242,6 +241,7 @@ static const struct object_ops mailslot_device_file_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ default_fd_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + mailslot_device_file_destroy /* destroy */ + }; +diff --git a/server/main.c b/server/main.c +index 5062d09..ddda5f4 100644 +--- a/server/main.c ++++ b/server/main.c +@@ -34,7 +34,6 @@ + #include "thread.h" + #include "request.h" + #include "unicode.h" +-#include "esync.h" + + /* command-line options */ + int debug_level = 0; +@@ -230,9 +229,6 @@ int main( int argc, char *argv[] ) + sock_init(); + open_master_socket(); + +- if (do_esync()) +- esync_init(); +- + if (debug_level) fprintf( stderr, "wineserver: starting (pid=%ld)\n", (long) getpid() ); + set_current_time(); + init_scheduler(); +diff --git a/server/mapping.c b/server/mapping.c +index 88de806..c3cba90 100644 +--- a/server/mapping.c ++++ b/server/mapping.c +@@ -67,7 +67,6 @@ static const struct object_ops ranges_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -80,6 +79,7 @@ static const struct object_ops ranges_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + ranges_destroy /* destroy */ + }; +@@ -104,7 +104,6 @@ static const struct object_ops shared_map_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -117,6 +116,7 @@ static const struct object_ops shared_map_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + shared_map_destroy /* destroy */ + }; +@@ -178,7 +178,6 @@ static const struct object_ops mapping_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + mapping_get_fd, /* get_fd */ +@@ -191,6 +190,7 @@ static const struct object_ops mapping_ops = + default_unlink_name, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + mapping_destroy /* destroy */ + }; +diff --git a/server/mutex.c b/server/mutex.c +index 4785a83..167c236 100644 +--- a/server/mutex.c ++++ b/server/mutex.c +@@ -38,6 +38,8 @@ + + static const WCHAR mutex_name[] = {'M','u','t','a','n','t'}; + ++static struct list fast_mutexes = LIST_INIT(fast_mutexes); ++ + struct type_descr mutex_type = + { + { mutex_name, sizeof(mutex_name) }, /* name */ +@@ -57,6 +59,8 @@ struct mutex + unsigned int count; /* recursion count */ + int abandoned; /* has it been abandoned? */ + struct list entry; /* entry in owner thread mutex list */ ++ struct list fast_mutexes_entry; /* entry in fast_mutexes list */ ++ struct fast_sync *fast_sync; /* fast synchronization object */ + }; + + static void mutex_dump( struct object *obj, int verbose ); +@@ -64,6 +68,7 @@ static int mutex_signaled( struct object *obj, struct wait_queue_entry *entry ); + static void mutex_satisfied( struct object *obj, struct wait_queue_entry *entry ); + static void mutex_destroy( struct object *obj ); + static int mutex_signal( struct object *obj, unsigned int access ); ++static struct fast_sync *mutex_get_fast_sync( struct object *obj ); + + static const struct object_ops mutex_ops = + { +@@ -73,7 +78,6 @@ static const struct object_ops mutex_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + mutex_signaled, /* signaled */ +- NULL, /* get_esync_fd */ + mutex_satisfied, /* satisfied */ + mutex_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -86,6 +90,7 @@ static const struct object_ops mutex_ops = + default_unlink_name, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ mutex_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + mutex_destroy /* destroy */ + }; +@@ -128,6 +133,7 @@ static struct mutex *create_mutex( struct object *root, const struct unicode_str + mutex->owner = NULL; + mutex->abandoned = 0; + if (owned) do_grab( mutex, current ); ++ mutex->fast_sync = NULL; + } + } + return mutex; +@@ -135,16 +141,22 @@ static struct mutex *create_mutex( struct object *root, const struct unicode_str + + void abandon_mutexes( struct thread *thread ) + { ++ struct mutex *mutex; + struct list *ptr; + + while ((ptr = list_head( &thread->mutex_list )) != NULL) + { +- struct mutex *mutex = LIST_ENTRY( ptr, struct mutex, entry ); ++ mutex = LIST_ENTRY( ptr, struct mutex, entry ); + assert( mutex->owner == thread ); + mutex->count = 0; + mutex->abandoned = 1; + do_release( mutex ); + } ++ ++ LIST_FOR_EACH_ENTRY(mutex, &fast_mutexes, struct mutex, fast_mutexes_entry) ++ { ++ fast_abandon_mutex( thread->id, mutex->fast_sync ); ++ } + } + + static void mutex_dump( struct object *obj, int verbose ) +@@ -190,14 +202,34 @@ static int mutex_signal( struct object *obj, unsigned int access ) + return 1; + } + ++static struct fast_sync *mutex_get_fast_sync( struct object *obj ) ++{ ++ struct mutex *mutex = (struct mutex *)obj; ++ ++ if (!mutex->fast_sync) ++ { ++ mutex->fast_sync = fast_create_mutex( mutex->owner ? mutex->owner->id : 0, mutex->count ); ++ if (mutex->fast_sync) list_add_tail( &fast_mutexes, &mutex->fast_mutexes_entry ); ++ } ++ if (mutex->fast_sync) grab_object( mutex->fast_sync ); ++ return mutex->fast_sync; ++} ++ + static void mutex_destroy( struct object *obj ) + { + struct mutex *mutex = (struct mutex *)obj; + assert( obj->ops == &mutex_ops ); + +- if (!mutex->count) return; +- mutex->count = 0; +- do_release( mutex ); ++ if (mutex->count) ++ { ++ mutex->count = 0; ++ do_release( mutex ); ++ } ++ if (mutex->fast_sync) ++ { ++ release_object( mutex->fast_sync ); ++ list_remove( &mutex->fast_mutexes_entry ); ++ } + } + + /* create a mutex */ +diff --git a/server/named_pipe.c b/server/named_pipe.c +index a90ec60..6d8cb3e 100644 +--- a/server/named_pipe.c ++++ b/server/named_pipe.c +@@ -119,7 +119,6 @@ static const struct object_ops named_pipe_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -132,6 +131,7 @@ static const struct object_ops named_pipe_ops = + default_unlink_name, /* unlink_name */ + named_pipe_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + named_pipe_destroy /* destroy */ + }; +@@ -168,7 +168,6 @@ static const struct object_ops pipe_server_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + default_fd_signaled, /* signaled */ +- default_fd_get_esync_fd, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + pipe_end_get_fd, /* get_fd */ +@@ -181,6 +180,7 @@ static const struct object_ops pipe_server_ops = + NULL, /* unlink_name */ + pipe_server_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ default_fd_get_fast_sync, /* get_fast_sync */ + async_close_obj_handle, /* close_handle */ + pipe_server_destroy /* destroy */ + }; +@@ -213,7 +213,6 @@ static const struct object_ops pipe_client_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + default_fd_signaled, /* signaled */ +- default_fd_get_esync_fd, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + pipe_end_get_fd, /* get_fd */ +@@ -226,6 +225,7 @@ static const struct object_ops pipe_client_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ default_fd_get_fast_sync, /* get_fast_sync */ + async_close_obj_handle, /* close_handle */ + pipe_end_destroy /* destroy */ + }; +@@ -261,7 +261,6 @@ static const struct object_ops named_pipe_device_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -274,6 +273,7 @@ static const struct object_ops named_pipe_device_ops = + default_unlink_name, /* unlink_name */ + named_pipe_device_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + named_pipe_device_destroy /* destroy */ + }; +@@ -293,7 +293,6 @@ static const struct object_ops named_pipe_device_file_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + default_fd_signaled, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + named_pipe_device_file_get_fd, /* get_fd */ +@@ -306,6 +305,7 @@ static const struct object_ops named_pipe_device_file_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ default_fd_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + named_pipe_device_file_destroy /* destroy */ + }; +diff --git a/server/object.c b/server/object.c +index 29f1ea9..33fc18c 100644 +--- a/server/object.c ++++ b/server/object.c +@@ -538,6 +538,12 @@ struct fd *no_get_fd( struct object *obj ) + return NULL; + } + ++struct fast_sync *no_get_fast_sync( struct object *obj ) ++{ ++ set_error( STATUS_OBJECT_TYPE_MISMATCH ); ++ return NULL; ++} ++ + unsigned int default_map_access( struct object *obj, unsigned int access ) + { + return map_access( access, &obj->ops->type->mapping ); +diff --git a/server/object.h b/server/object.h +index 6f4bca8..f028a8e 100644 +--- a/server/object.h ++++ b/server/object.h +@@ -42,6 +42,7 @@ struct async; + struct async_queue; + struct winstation; + struct object_type; ++struct fast_sync; + + + struct unicode_str +@@ -78,8 +79,6 @@ struct object_ops + void (*remove_queue)(struct object *,struct wait_queue_entry *); + /* is object signaled? */ + int (*signaled)(struct object *,struct wait_queue_entry *); +- /* return the esync fd for this object */ +- int (*get_esync_fd)(struct object *, enum esync_type *type); + /* wait satisfied */ + void (*satisfied)(struct object *,struct wait_queue_entry *); + /* signal an object */ +@@ -105,6 +104,8 @@ struct object_ops + unsigned int options); + /* return list of kernel objects */ + struct list *(*get_kernel_obj_list)(struct object *); ++ /* get a client-waitable fast-synchronization handle to this object */ ++ struct fast_sync *(*get_fast_sync)(struct object *); + /* close a handle to this object */ + int (*close_handle)(struct object *,struct process *,obj_handle_t); + /* destroy on refcount == 0 */ +@@ -224,6 +225,17 @@ extern void reset_event( struct event *event ); + + extern void abandon_mutexes( struct thread *thread ); + ++/* fast-synchronization functions */ ++ ++extern struct fast_sync *fast_create_event( enum fast_sync_type type, int signaled ); ++extern struct fast_sync *fast_create_semaphore( unsigned int count, unsigned int max ); ++extern struct fast_sync *fast_create_mutex( thread_id_t owner, unsigned int count ); ++extern void fast_set_event( struct fast_sync *obj ); ++extern void fast_reset_event( struct fast_sync *obj ); ++extern void fast_abandon_mutex( thread_id_t tid, struct fast_sync *fast_sync ); ++ ++extern struct fast_sync *no_get_fast_sync( struct object *obj ); ++ + /* serial functions */ + + int get_serial_async_timeout(struct object *obj, int type, int count); +diff --git a/server/process.c b/server/process.c +index 6d66a7c..34451c1 100644 +--- a/server/process.c ++++ b/server/process.c +@@ -63,7 +63,6 @@ + #include "request.h" + #include "user.h" + #include "security.h" +-#include "esync.h" + + /* process object */ + +@@ -95,8 +94,8 @@ static unsigned int process_map_access( struct object *obj, unsigned int access + static struct security_descriptor *process_get_sd( struct object *obj ); + static void process_poll_event( struct fd *fd, int event ); + static struct list *process_get_kernel_obj_list( struct object *obj ); ++static struct fast_sync *process_get_fast_sync( struct object *obj ); + static void process_destroy( struct object *obj ); +-static int process_get_esync_fd( struct object *obj, enum esync_type *type ); + static void terminate_process( struct process *process, struct thread *skip, int exit_code ); + + static const struct object_ops process_ops = +@@ -107,7 +106,6 @@ static const struct object_ops process_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + process_signaled, /* signaled */ +- process_get_esync_fd, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -120,6 +118,7 @@ static const struct object_ops process_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + process_get_kernel_obj_list, /* get_kernel_obj_list */ ++ process_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + process_destroy /* destroy */ + }; +@@ -159,7 +158,6 @@ static const struct object_ops startup_info_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + startup_info_signaled, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -172,6 +170,7 @@ static const struct object_ops startup_info_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + startup_info_destroy /* destroy */ + }; +@@ -194,6 +193,7 @@ struct type_descr job_type = + + static void job_dump( struct object *obj, int verbose ); + static int job_signaled( struct object *obj, struct wait_queue_entry *entry ); ++static struct fast_sync *job_get_fast_sync( struct object *obj ); + static int job_close_handle( struct object *obj, struct process *process, obj_handle_t handle ); + static void job_destroy( struct object *obj ); + +@@ -211,6 +211,7 @@ struct job + struct job *parent; + struct list parent_job_entry; /* list entry for parent job */ + struct list child_job_list; /* list of child jobs */ ++ struct fast_sync *fast_sync; /* fast synchronization object */ + }; + + static const struct object_ops job_ops = +@@ -221,7 +222,6 @@ static const struct object_ops job_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + job_signaled, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -234,6 +234,7 @@ static const struct object_ops job_ops = + default_unlink_name, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ job_get_fast_sync, /* get_fast_sync */ + job_close_handle, /* close_handle */ + job_destroy /* destroy */ + }; +@@ -258,6 +259,7 @@ static struct job *create_job_object( struct object *root, const struct unicode_ + job->completion_port = NULL; + job->completion_key = 0; + job->parent = NULL; ++ job->fast_sync = NULL; + } + } + return job; +@@ -414,6 +416,17 @@ static void terminate_job( struct job *job, int exit_code ) + job->terminating = 0; + job->signaled = 1; + wake_up( &job->obj, 0 ); ++ fast_set_event( job->fast_sync ); ++} ++ ++static struct fast_sync *job_get_fast_sync( struct object *obj ) ++{ ++ struct job *job = (struct job *)obj; ++ ++ if (!job->fast_sync) ++ job->fast_sync = fast_create_event( FAST_SYNC_MANUAL_SERVER, job->signaled ); ++ if (job->fast_sync) grab_object( job->fast_sync ); ++ return job->fast_sync; + } + + static int job_close_handle( struct object *obj, struct process *process, obj_handle_t handle ) +@@ -444,6 +457,8 @@ static void job_destroy( struct object *obj ) + list_remove( &job->parent_job_entry ); + release_object( job->parent ); + } ++ ++ if (job->fast_sync) release_object( job->fast_sync ); + } + + static void job_dump( struct object *obj, int verbose ) +@@ -688,9 +703,9 @@ struct process *create_process( int fd, struct process *parent, unsigned int fla + process->rawinput_device_count = 0; + process->rawinput_mouse = NULL; + process->rawinput_kbd = NULL; ++ process->fast_sync = NULL; + memset( &process->image_info, 0, sizeof(process->image_info) ); + list_init( &process->rawinput_entry ); +- process->esync_fd = -1; + list_init( &process->kernel_object ); + list_init( &process->thread_list ); + list_init( &process->locks ); +@@ -747,9 +762,6 @@ struct process *create_process( int fd, struct process *parent, unsigned int fla + if (!token_assign_label( process->token, &high_label_sid )) + goto error; + +- if (do_esync()) +- process->esync_fd = esync_create_fd( 0, 0 ); +- + set_fd_events( process->msg_fd, POLLIN ); /* start listening to events */ + return process; + +@@ -798,7 +810,8 @@ static void process_destroy( struct object *obj ) + free( process->rawinput_devices ); + free( process->dir_cache ); + free( process->image ); +- if (do_esync()) close( process->esync_fd ); ++ ++ if (process->fast_sync) release_object( process->fast_sync ); + } + + /* dump a process on stdout for debugging purposes */ +@@ -816,13 +829,6 @@ static int process_signaled( struct object *obj, struct wait_queue_entry *entry + return !process->running_threads; + } + +-static int process_get_esync_fd( struct object *obj, enum esync_type *type ) +-{ +- struct process *process = (struct process *)obj; +- *type = ESYNC_MANUAL_SERVER; +- return process->esync_fd; +-} +- + static unsigned int process_map_access( struct object *obj, unsigned int access ) + { + access = default_map_access( obj, access ); +@@ -837,6 +843,16 @@ static struct list *process_get_kernel_obj_list( struct object *obj ) + return &process->kernel_object; + } + ++static struct fast_sync *process_get_fast_sync( struct object *obj ) ++{ ++ struct process *process = (struct process *)obj; ++ ++ if (!process->fast_sync) ++ process->fast_sync = fast_create_event( FAST_SYNC_MANUAL_SERVER, !process->running_threads ); ++ if (process->fast_sync) grab_object( process->fast_sync ); ++ return process->fast_sync; ++} ++ + static struct security_descriptor *process_get_sd( struct object *obj ) + { + static struct security_descriptor *process_default_sd; +@@ -1001,6 +1017,7 @@ static void process_killed( struct process *process ) + release_job_process( process ); + start_sigkill_timer( process ); + wake_up( &process->obj, 0 ); ++ fast_set_event( process->fast_sync ); + } + + /* add a thread to a process running threads list */ +diff --git a/server/process.h b/server/process.h +index bedd8bb..2140427 100644 +--- a/server/process.h ++++ b/server/process.h +@@ -86,7 +86,7 @@ struct process + struct list rawinput_entry; /* entry in the rawinput process list */ + struct list kernel_object; /* list of kernel object pointers */ + pe_image_info_t image_info; /* main exe image info */ +- int esync_fd; /* esync file descriptor (signaled on exit) */ ++ struct fast_sync *fast_sync; /* fast synchronization object */ + }; + + /* process functions */ +diff --git a/server/protocol.def b/server/protocol.def +index 4983691..7bc4208 100644 +--- a/server/protocol.def ++++ b/server/protocol.def +@@ -3887,6 +3887,7 @@ struct handle_info + obj_handle_t handle; /* process handle */ + @END + ++ + /* Iterate thread list for process */ + @REQ(get_next_thread) + obj_handle_t process; /* process handle */ +@@ -3898,63 +3899,60 @@ struct handle_info + obj_handle_t handle; /* next thread handle */ + @END + +-enum esync_type ++ ++enum fast_sync_type + { +- ESYNC_SEMAPHORE = 1, +- ESYNC_AUTO_EVENT, +- ESYNC_MANUAL_EVENT, +- ESYNC_MUTEX, +- ESYNC_AUTO_SERVER, +- ESYNC_MANUAL_SERVER, +- ESYNC_QUEUE, ++ FAST_SYNC_SEMAPHORE = 1, ++ FAST_SYNC_MUTEX, ++ FAST_SYNC_AUTO_EVENT, ++ FAST_SYNC_MANUAL_EVENT, ++ FAST_SYNC_AUTO_SERVER, ++ FAST_SYNC_MANUAL_SERVER, ++ FAST_SYNC_QUEUE, + }; + +-/* Create a new eventfd-based synchronization object */ +-@REQ(create_esync) +- unsigned int access; /* wanted access rights */ +- int initval; /* initial value */ +- int type; /* type of esync object */ +- int max; /* maximum count on a semaphore */ +- VARARG(objattr,object_attributes); /* object attributes */ ++ ++/* Obtain a handle to the fast synchronization device object */ ++@REQ(get_linux_sync_device) + @REPLY +- obj_handle_t handle; /* handle to the object */ +- int type; /* actual type (may be different for events) */ +- unsigned int shm_idx; ++ obj_handle_t handle; /* handle to the device */ + @END + +-@REQ(open_esync) +- unsigned int access; /* wanted access rights */ +- unsigned int attributes; /* object attributes */ +- obj_handle_t rootdir; /* root directory */ +- int type; /* type of esync object (above) */ +- VARARG(name,unicode_str); /* object name */ ++ ++/* Get the fast synchronization object associated with the given handle */ ++@REQ(get_linux_sync_obj) ++ obj_handle_t handle; /* handle to the object */ + @REPLY +- obj_handle_t handle; /* handle to the event */ +- int type; /* type of esync object (above) */ +- unsigned int shm_idx; /* this object's index into the shm section */ ++ obj_handle_t handle; /* handle to the fast synchronization object */ ++ int type; /* object type */ ++ unsigned int access; /* handle access rights */ + @END + +-/* Retrieve the esync fd for an object. */ +-@REQ(get_esync_fd) +- obj_handle_t handle; /* handle to the object */ +-@REPLY +- int type; +- unsigned int shm_idx; ++ ++/* Begin a client-side wait on a message queue */ ++@REQ(fast_select_queue) ++ obj_handle_t handle; /* handle to the queue */ + @END + +-@REQ(esync_msgwait) +- int in_msgwait; /* are we in a message wait? */ ++ ++/* End a client-side wait on a message queue */ ++@REQ(fast_unselect_queue) ++ obj_handle_t handle; /* handle to the queue */ ++ int signaled; /* was the queue signaled? */ + @END + + /* Setup keyboard auto-repeat */ + @REQ(set_keyboard_repeat) + int enable; /* whether to enable auto-repeat */ + int delay; /* auto-repeat delay in ms */ + int period; /* auto-repeat period in ms */ + @REPLY + int enable; /* previous state of auto-repeat enable */ + @END + +-/* Retrieve the fd to wait on for user APCs. */ +-@REQ(get_esync_apc_fd) ++ ++/* Get an event handle to be used for thread alerts with fast synchronization */ ++@REQ(get_fast_alert_event) ++@REPLY ++ obj_handle_t handle; /* handle to the event */ + @END +diff --git a/server/queue.c b/server/queue.c +index fee3a8d..287a56e 100644 +--- a/server/queue.c ++++ b/server/queue.c +@@ -43,7 +43,6 @@ + #include "process.h" + #include "request.h" + #include "user.h" +-#include "esync.h" + + #define WM_NCMOUSEFIRST WM_NCMOUSEMOVE + #define WM_NCMOUSELAST (WM_NCMOUSEFIRST+(WM_MOUSELAST-WM_MOUSEFIRST)) +@@ -147,8 +146,8 @@ struct msg_queue + timeout_t last_get_msg; /* time of last get message call */ + int keystate_lock; /* owns an input keystate lock */ + unsigned int ignore_post_msg; /* ignore post messages newer than this unique id */ +- int esync_fd; /* esync file descriptor (signalled on message) */ +- int esync_in_msgwait; /* our thread is currently waiting on us */ ++ struct fast_sync *fast_sync; /* fast synchronization object */ ++ int in_fast_wait; /* are we in a client-side wait? */ + }; + + struct hotkey +@@ -165,8 +164,8 @@ static void msg_queue_dump( struct object *obj, int verbose ); + static int msg_queue_add_queue( struct object *obj, struct wait_queue_entry *entry ); + static void msg_queue_remove_queue( struct object *obj, struct wait_queue_entry *entry ); + static int msg_queue_signaled( struct object *obj, struct wait_queue_entry *entry ); +-static int msg_queue_get_esync_fd( struct object *obj, enum esync_type *type ); + static void msg_queue_satisfied( struct object *obj, struct wait_queue_entry *entry ); ++static struct fast_sync *msg_queue_get_fast_sync( struct object *obj ); + static void msg_queue_destroy( struct object *obj ); + static void msg_queue_poll_event( struct fd *fd, int event ); + static void thread_input_dump( struct object *obj, int verbose ); +@@ -181,7 +180,6 @@ static const struct object_ops msg_queue_ops = + msg_queue_add_queue, /* add_queue */ + msg_queue_remove_queue, /* remove_queue */ + msg_queue_signaled, /* signaled */ +- msg_queue_get_esync_fd, /* get_esync_fd */ + msg_queue_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -194,6 +192,7 @@ static const struct object_ops msg_queue_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ msg_queue_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + msg_queue_destroy /* destroy */ + }; +@@ -219,7 +218,6 @@ static const struct object_ops thread_input_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -232,6 +230,7 @@ static const struct object_ops thread_input_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + thread_input_destroy /* destroy */ + }; +@@ -321,33 +320,30 @@ static struct msg_queue *create_msg_queue( struct thread *thread, struct thread_ + queue->last_get_msg = current_time; + queue->keystate_lock = 0; + queue->ignore_post_msg = 0; +- queue->esync_fd = -1; +- queue->esync_in_msgwait = 0; ++ queue->fast_sync = NULL; ++ queue->in_fast_wait = 0; + list_init( &queue->send_result ); + list_init( &queue->callback_result ); + list_init( &queue->pending_timers ); + list_init( &queue->expired_timers ); + for (i = 0; i < NB_MSG_KINDS; i++) list_init( &queue->msg_list[i] ); + + if (!(queue->shared = alloc_shared_object())) + { + release_object( queue ); + return NULL; + } + + SHARED_WRITE_BEGIN( queue->shared, queue_shm_t ) + { + memset( (void *)shared->hooks_count, 0, sizeof(shared->hooks_count) ); + shared->wake_mask = 0; + shared->wake_bits = 0; + shared->changed_mask = 0; + shared->changed_bits = 0; + } + SHARED_WRITE_END; + +- if (do_esync()) +- queue->esync_fd = esync_create_fd( 0, 0 ); +- + thread->queue = queue; + } + +@@ -606,7 +602,11 @@ static inline void set_queue_bits( struct msg_queue *queue, unsigned int bits ) + } + SHARED_WRITE_END; + +- if (is_signaled( queue )) wake_up( &queue->obj, 0 ); ++ if (is_signaled( queue )) ++ { ++ wake_up( &queue->obj, 0 ); ++ fast_set_event( queue->fast_sync ); ++ } + } + + /* clear some queue bits */ +@@ -620,9 +620,8 @@ static inline void clear_queue_bits( struct msg_queue *queue, unsigned int bits + if (queue->keystate_lock) unlock_input_keystate( queue->input ); + queue->keystate_lock = 0; + } +- +- if (do_esync() && !is_signaled( queue )) +- esync_clear( queue->esync_fd ); ++ if (!is_signaled( queue )) ++ fast_reset_event( queue->fast_sync ); + } + + /* check if message is matched by the filter */ +@@ -1112,8 +1111,8 @@ static int is_queue_hung( struct msg_queue *queue ) + return 0; /* thread is waiting on queue -> not hung */ + } + +- if (do_esync() && queue->esync_in_msgwait) +- return 0; /* thread is waiting on queue in absentia -> not hung */ ++ if (queue->in_fast_wait) ++ return 0; /* thread is waiting on queue in absentia -> not hung */ + + return 1; + } +@@ -1168,24 +1167,30 @@ static int msg_queue_signaled( struct object *obj, struct wait_queue_entry *entr + return ret || is_signaled( queue ); + } + +-static int msg_queue_get_esync_fd( struct object *obj, enum esync_type *type ) ++static void msg_queue_satisfied( struct object *obj, struct wait_queue_entry *entry ) + { + struct msg_queue *queue = (struct msg_queue *)obj; +- *type = ESYNC_QUEUE; +- return queue->esync_fd; ++ const queue_shm_t *queue_shm = queue->shared; ++ ++ SHARED_WRITE_BEGIN( queue_shm, queue_shm_t ) ++ { ++ shared->wake_mask = 0; ++ shared->changed_mask = 0; ++ } ++ SHARED_WRITE_END; ++ ++ fast_reset_event( queue->fast_sync ); + } + +-static void msg_queue_satisfied( struct object *obj, struct wait_queue_entry *entry ) ++static struct fast_sync *msg_queue_get_fast_sync( struct object *obj ) + { + struct msg_queue *queue = (struct msg_queue *)obj; + const queue_shm_t *queue_shm = queue->shared; + +- SHARED_WRITE_BEGIN( queue_shm, queue_shm_t ) +- { +- shared->wake_mask = 0; +- shared->changed_mask = 0; +- } +- SHARED_WRITE_END; ++ if (!queue->fast_sync) ++ queue->fast_sync = fast_create_event( FAST_SYNC_QUEUE, is_signaled( queue ) ); ++ if (queue->fast_sync) grab_object( queue->fast_sync ); ++ return queue->fast_sync; + } + + static void msg_queue_destroy( struct object *obj ) +@@ -1221,7 +1224,7 @@ static void msg_queue_destroy( struct object *obj ) + if (queue->hooks) release_object( queue->hooks ); + if (queue->fd) release_object( queue->fd ); + if (queue->shared) free_shared_object( queue->shared ); +- if (do_esync()) close( queue->esync_fd ); ++ if (queue->fast_sync) release_object( queue->fast_sync ); + } + + static void msg_queue_poll_event( struct fd *fd, int event ) +@@ -1232,6 +1235,7 @@ static void msg_queue_poll_event( struct fd *fd, int event ) + if (event & (POLLERR | POLLHUP)) set_fd_events( fd, -1 ); + else set_fd_events( queue->fd, 0 ); + wake_up( &queue->obj, 0 ); ++ fast_set_event( queue->fast_sync ); + } + + static void thread_input_dump( struct object *obj, int verbose ) +@@ -2982,11 +2982,14 @@ DECL_HANDLER(set_queue_mask) + shared->changed_mask = 0; + } + SHARED_WRITE_END; ++ ++ fast_reset_event( queue->fast_sync ); ++ } ++ else ++ { ++ wake_up( &queue->obj, 0 ); ++ fast_set_event( queue->fast_sync ); + } +- else wake_up( &queue->obj, 0 ); + } +- +- if (do_esync() && !is_signaled( queue )) +- esync_clear( queue->esync_fd ); + } + } +@@ -2867,8 +2880,8 @@ DECL_HANDLER(get_queue_status) + } + SHARED_WRITE_END; + +- if (do_esync() && !is_signaled( queue )) +- esync_clear( queue->esync_fd ); ++ if (!is_signaled( queue )) ++ fast_reset_event( queue->fast_sync ); + } + else reply->wake_bits = reply->changed_bits = 0; + } +@@ -3049,6 +3061,9 @@ DECL_HANDLER(get_message) + if (filter & QS_INPUT) queue->changed_bits &= ~QS_INPUT; + if (filter & QS_PAINT) queue->changed_bits &= ~QS_PAINT; + ++ if (!is_signaled( queue )) ++ fast_reset_event( queue->fast_sync ); ++ + /* then check for posted messages */ + if ((filter & QS_POSTMESSAGE) && + get_posted_message( queue, queue->ignore_post_msg, get_win, req->get_first, req->get_last, req->flags, reply )) +@@ -3119,11 +3134,8 @@ DECL_HANDLER(get_message) + shared->changed_mask = req->changed_mask; + } + SHARED_WRITE_END; + ++ fast_reset_event( queue->fast_sync ); + set_error( STATUS_PENDING ); /* FIXME */ +- +- if (do_esync() && !is_signaled( queue )) +- esync_clear( queue->esync_fd ); +- + return; + + found_msg: +diff --git a/server/registry.c b/server/registry.c +index 4454de3..dd5c556 100644 +--- a/server/registry.c ++++ b/server/registry.c +@@ -180,7 +180,6 @@ static const struct object_ops key_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -193,6 +192,7 @@ static const struct object_ops key_ops = + key_unlink_name, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + key_close_handle, /* close_handle */ + key_destroy /* destroy */ + }; +diff --git a/server/request.c b/server/request.c +index ca83fdb..8c50f99 100644 +--- a/server/request.c ++++ b/server/request.c +@@ -90,7 +90,6 @@ static const struct object_ops master_socket_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -103,6 +102,7 @@ static const struct object_ops master_socket_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + master_socket_destroy /* destroy */ + }; +diff --git a/server/request.h b/server/request.h +index eb6b8f4..1636e98 100644 +--- a/server/request.h ++++ b/server/request.h +@@ -406,12 +406,12 @@ DECL_HANDLER(terminate_job); + DECL_HANDLER(suspend_process); + DECL_HANDLER(resume_process); + DECL_HANDLER(get_next_thread); +-DECL_HANDLER(create_esync); +-DECL_HANDLER(open_esync); +-DECL_HANDLER(get_esync_fd); +-DECL_HANDLER(esync_msgwait); ++DECL_HANDLER(get_linux_sync_device); ++DECL_HANDLER(get_linux_sync_obj); ++DECL_HANDLER(fast_select_queue); ++DECL_HANDLER(fast_unselect_queue); + DECL_HANDLER(set_keyboard_repeat); +-DECL_HANDLER(get_esync_apc_fd); ++DECL_HANDLER(get_fast_alert_event); + + #ifdef WANT_REQUEST_HANDLERS + +@@ -704,12 +704,12 @@ static const req_handler req_handlers[REQ_NB_REQUESTS] = + (req_handler)req_suspend_process, + (req_handler)req_resume_process, + (req_handler)req_get_next_thread, +- (req_handler)req_create_esync, +- (req_handler)req_open_esync, +- (req_handler)req_get_esync_fd, +- (req_handler)req_esync_msgwait, ++ (req_handler)req_get_linux_sync_device, ++ (req_handler)req_get_linux_sync_obj, ++ (req_handler)req_fast_select_queue, ++ (req_handler)req_fast_unselect_queue, + (req_handler)req_set_keyboard_repeat, +- (req_handler)req_get_esync_apc_fd, ++ (req_handler)req_get_fast_alert_event, + }; + + C_ASSERT( sizeof(abstime_t) == 8 ); +@@ -2363,37 +2363,28 @@ C_ASSERT( FIELD_OFFSET(struct get_next_thread_request, flags) == 28 ); + C_ASSERT( sizeof(struct get_next_thread_request) == 32 ); + C_ASSERT( FIELD_OFFSET(struct get_next_thread_reply, handle) == 8 ); + C_ASSERT( sizeof(struct get_next_thread_reply) == 16 ); +-C_ASSERT( FIELD_OFFSET(struct create_esync_request, access) == 12 ); +-C_ASSERT( FIELD_OFFSET(struct create_esync_request, initval) == 16 ); +-C_ASSERT( FIELD_OFFSET(struct create_esync_request, type) == 20 ); +-C_ASSERT( FIELD_OFFSET(struct create_esync_request, max) == 24 ); +-C_ASSERT( sizeof(struct create_esync_request) == 32 ); +-C_ASSERT( FIELD_OFFSET(struct create_esync_reply, handle) == 8 ); +-C_ASSERT( FIELD_OFFSET(struct create_esync_reply, type) == 12 ); +-C_ASSERT( FIELD_OFFSET(struct create_esync_reply, shm_idx) == 16 ); +-C_ASSERT( sizeof(struct create_esync_reply) == 24 ); +-C_ASSERT( FIELD_OFFSET(struct open_esync_request, access) == 12 ); +-C_ASSERT( FIELD_OFFSET(struct open_esync_request, attributes) == 16 ); +-C_ASSERT( FIELD_OFFSET(struct open_esync_request, rootdir) == 20 ); +-C_ASSERT( FIELD_OFFSET(struct open_esync_request, type) == 24 ); +-C_ASSERT( sizeof(struct open_esync_request) == 32 ); +-C_ASSERT( FIELD_OFFSET(struct open_esync_reply, handle) == 8 ); +-C_ASSERT( FIELD_OFFSET(struct open_esync_reply, type) == 12 ); +-C_ASSERT( FIELD_OFFSET(struct open_esync_reply, shm_idx) == 16 ); +-C_ASSERT( sizeof(struct open_esync_reply) == 24 ); +-C_ASSERT( FIELD_OFFSET(struct get_esync_fd_request, handle) == 12 ); +-C_ASSERT( sizeof(struct get_esync_fd_request) == 16 ); +-C_ASSERT( FIELD_OFFSET(struct get_esync_fd_reply, type) == 8 ); +-C_ASSERT( FIELD_OFFSET(struct get_esync_fd_reply, shm_idx) == 12 ); +-C_ASSERT( sizeof(struct get_esync_fd_reply) == 16 ); +-C_ASSERT( FIELD_OFFSET(struct esync_msgwait_request, in_msgwait) == 12 ); +-C_ASSERT( sizeof(struct esync_msgwait_request) == 16 ); ++C_ASSERT( sizeof(struct get_linux_sync_device_request) == 16 ); ++C_ASSERT( FIELD_OFFSET(struct get_linux_sync_device_reply, handle) == 8 ); ++C_ASSERT( sizeof(struct get_linux_sync_device_reply) == 16 ); ++C_ASSERT( FIELD_OFFSET(struct get_linux_sync_obj_request, handle) == 12 ); ++C_ASSERT( sizeof(struct get_linux_sync_obj_request) == 16 ); ++C_ASSERT( FIELD_OFFSET(struct get_linux_sync_obj_reply, handle) == 8 ); ++C_ASSERT( FIELD_OFFSET(struct get_linux_sync_obj_reply, type) == 12 ); ++C_ASSERT( FIELD_OFFSET(struct get_linux_sync_obj_reply, access) == 16 ); ++C_ASSERT( sizeof(struct get_linux_sync_obj_reply) == 24 ); ++C_ASSERT( FIELD_OFFSET(struct fast_select_queue_request, handle) == 12 ); ++C_ASSERT( sizeof(struct fast_select_queue_request) == 16 ); ++C_ASSERT( FIELD_OFFSET(struct fast_unselect_queue_request, handle) == 12 ); ++C_ASSERT( FIELD_OFFSET(struct fast_unselect_queue_request, signaled) == 16 ); ++C_ASSERT( sizeof(struct fast_unselect_queue_request) == 24 ); ++C_ASSERT( sizeof(struct get_fast_alert_event_request) == 16 ); ++C_ASSERT( FIELD_OFFSET(struct get_fast_alert_event_reply, handle) == 8 ); + C_ASSERT( FIELD_OFFSET(struct set_keyboard_repeat_request, enable) == 12 ); + C_ASSERT( FIELD_OFFSET(struct set_keyboard_repeat_request, delay) == 16 ); + C_ASSERT( FIELD_OFFSET(struct set_keyboard_repeat_request, period) == 20 ); + C_ASSERT( sizeof(struct set_keyboard_repeat_request) == 24 ); + C_ASSERT( FIELD_OFFSET(struct set_keyboard_repeat_reply, enable) == 8 ); + C_ASSERT( sizeof(struct set_keyboard_repeat_reply) == 16 ); +-C_ASSERT( sizeof(struct get_esync_apc_fd_request) == 16 ); ++C_ASSERT( sizeof(struct get_fast_alert_event_reply) == 16 ); + + #endif /* WANT_REQUEST_HANDLERS */ + +diff --git a/server/semaphore.c b/server/semaphore.c +index e3889f2..9940919 100644 +--- a/server/semaphore.c ++++ b/server/semaphore.c +@@ -55,12 +55,15 @@ struct semaphore + struct object obj; /* object header */ + unsigned int count; /* current count */ + unsigned int max; /* maximum possible count */ ++ struct fast_sync *fast_sync; /* fast synchronization object */ + }; + + static void semaphore_dump( struct object *obj, int verbose ); + static int semaphore_signaled( struct object *obj, struct wait_queue_entry *entry ); + static void semaphore_satisfied( struct object *obj, struct wait_queue_entry *entry ); + static int semaphore_signal( struct object *obj, unsigned int access ); ++static struct fast_sync *semaphore_get_fast_sync( struct object *obj ); ++static void semaphore_destroy( struct object *obj ); + + static const struct object_ops semaphore_ops = + { +@@ -70,7 +73,6 @@ static const struct object_ops semaphore_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + semaphore_signaled, /* signaled */ +- NULL, /* get_esync_fd */ + semaphore_satisfied, /* satisfied */ + semaphore_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -83,8 +85,9 @@ static const struct object_ops semaphore_ops = + default_unlink_name, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ semaphore_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ +- no_destroy /* destroy */ ++ semaphore_destroy /* destroy */ + }; + + +@@ -106,6 +109,7 @@ static struct semaphore *create_semaphore( struct object *root, const struct uni + /* initialize it if it didn't already exist */ + sem->count = initial; + sem->max = max; ++ sem->fast_sync = NULL; + } + } + return sem; +@@ -168,6 +172,23 @@ static int semaphore_signal( struct object *obj, unsigned int access ) + return release_semaphore( sem, 1, NULL ); + } + ++static struct fast_sync *semaphore_get_fast_sync( struct object *obj ) ++{ ++ struct semaphore *semaphore = (struct semaphore *)obj; ++ ++ if (!semaphore->fast_sync) ++ semaphore->fast_sync = fast_create_semaphore( semaphore->count, semaphore->max ); ++ if (semaphore->fast_sync) grab_object( semaphore->fast_sync ); ++ return semaphore->fast_sync; ++} ++ ++static void semaphore_destroy( struct object *obj ) ++{ ++ struct semaphore *semaphore = (struct semaphore *)obj; ++ ++ if (semaphore->fast_sync) release_object( semaphore->fast_sync ); ++} ++ + /* create a semaphore */ + DECL_HANDLER(create_semaphore) + { +diff --git a/server/serial.c b/server/serial.c +index 11e204e..5c210d1 100644 +--- a/server/serial.c ++++ b/server/serial.c +@@ -85,7 +85,6 @@ static const struct object_ops serial_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + default_fd_signaled, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + serial_get_fd, /* get_fd */ +@@ -98,6 +97,7 @@ static const struct object_ops serial_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ default_fd_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + serial_destroy /* destroy */ + }; +diff --git a/server/signal.c b/server/signal.c +index 55cd6aa..e5def3d 100644 +--- a/server/signal.c ++++ b/server/signal.c +@@ -62,7 +62,6 @@ static const struct object_ops handler_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -75,6 +74,7 @@ static const struct object_ops handler_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + handler_destroy /* destroy */ + }; +diff --git a/server/sock.c b/server/sock.c +index 104321b..3fb2e29 100644 +--- a/server/sock.c ++++ b/server/sock.c +@@ -453,7 +453,6 @@ static const struct object_ops sock_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + default_fd_signaled, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + sock_get_fd, /* get_fd */ +@@ -466,6 +465,7 @@ static const struct object_ops sock_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ default_fd_get_fast_sync, /* get_fast_sync */ + sock_close_handle, /* close_handle */ + sock_destroy /* destroy */ + }; +@@ -3555,7 +3555,6 @@ static const struct object_ops ifchange_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + ifchange_get_fd, /* get_fd */ +@@ -3568,6 +3567,7 @@ static const struct object_ops ifchange_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + ifchange_destroy /* destroy */ + }; +@@ -3777,7 +3777,6 @@ static const struct object_ops socket_device_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -3790,6 +3789,7 @@ static const struct object_ops socket_device_ops = + default_unlink_name, /* unlink_name */ + socket_device_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + no_destroy /* destroy */ + }; +diff --git a/server/symlink.c b/server/symlink.c +index c7f3441..4a7cf68 100644 +--- a/server/symlink.c ++++ b/server/symlink.c +@@ -71,7 +71,6 @@ static const struct object_ops symlink_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -84,6 +83,7 @@ static const struct object_ops symlink_ops = + default_unlink_name, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + symlink_destroy /* destroy */ + }; +diff --git a/server/thread.c b/server/thread.c +index 2ce94b4..8f603fc 100644 +--- a/server/thread.c ++++ b/server/thread.c +@@ -50,7 +50,6 @@ + #include "request.h" + #include "user.h" + #include "security.h" +-#include "esync.h" + + + /* thread queues */ +@@ -97,7 +96,6 @@ static const struct object_ops thread_apc_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + thread_apc_signaled, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -110,6 +108,7 @@ static const struct object_ops thread_apc_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + thread_apc_destroy /* destroy */ + }; +@@ -140,7 +139,6 @@ static const struct object_ops context_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + context_signaled, /* signaled */ +- NULL, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -153,6 +151,7 @@ static const struct object_ops context_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + no_destroy /* destroy */ + }; +@@ -177,10 +176,10 @@ struct type_descr thread_type = + + static void dump_thread( struct object *obj, int verbose ); + static int thread_signaled( struct object *obj, struct wait_queue_entry *entry ); +-static int thread_get_esync_fd( struct object *obj, enum esync_type *type ); + static unsigned int thread_map_access( struct object *obj, unsigned int access ); + static void thread_poll_event( struct fd *fd, int event ); + static struct list *thread_get_kernel_obj_list( struct object *obj ); ++static struct fast_sync *thread_get_fast_sync( struct object *obj ); + static void destroy_thread( struct object *obj ); + + static const struct object_ops thread_ops = +@@ -191,7 +190,6 @@ static const struct object_ops thread_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + thread_signaled, /* signaled */ +- thread_get_esync_fd, /* get_esync_fd */ + no_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -204,6 +202,7 @@ static const struct object_ops thread_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + thread_get_kernel_obj_list, /* get_kernel_obj_list */ ++ thread_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + destroy_thread /* destroy */ + }; +@@ -231,8 +230,6 @@ static inline void init_thread_structure( struct thread *thread ) + thread->context = NULL; + thread->teb = 0; + thread->entry_point = 0; +- thread->esync_fd = -1; +- thread->esync_apc_fd = -1; + thread->system_regs = 0; + thread->queue = NULL; + thread->wait = NULL; +@@ -253,6 +250,7 @@ static inline void init_thread_structure( struct thread *thread ) + thread->token = NULL; + thread->desc = NULL; + thread->desc_len = 0; ++ thread->fast_sync = NULL; ++ thread->fast_alert_event = NULL; +- thread->exit_poll = NULL; + + thread->creation_time = current_time; +@@ -380,12 +379,6 @@ struct thread *create_thread( int fd, struct process *process, const struct secu + } + } + +- if (do_esync()) +- { +- thread->esync_fd = esync_create_fd( 0, 0 ); +- thread->esync_apc_fd = esync_create_fd( 0, 0 ); +- } +- + set_fd_events( thread->request_fd, POLLIN ); /* start listening to events */ + add_process_thread( thread->process, thread ); + return thread; +@@ -410,6 +403,16 @@ static struct list *thread_get_kernel_obj_list( struct object *obj ) + return &thread->kernel_object; + } + ++static struct fast_sync *thread_get_fast_sync( struct object *obj ) ++{ ++ struct thread *thread = (struct thread *)obj; ++ ++ if (!thread->fast_sync) ++ thread->fast_sync = fast_create_event( FAST_SYNC_MANUAL_SERVER, thread->state == TERMINATED ); ++ if (thread->fast_sync) grab_object( thread->fast_sync ); ++ return thread->fast_sync; ++} ++ + /* cleanup everything that is no longer needed by a dead thread */ + /* used by destroy_thread and kill_thread */ + static void cleanup_thread( struct thread *thread ) +@@ -465,9 +468,7 @@ static void destroy_thread( struct object *obj ) +- if (thread->exit_poll) remove_timeout_user( thread->exit_poll ); + if (thread->id) free_ptid( thread->id ); + if (thread->token) release_object( thread->token ); +- +- if (do_esync()) +- close( thread->esync_fd ); ++ if (thread->fast_sync) release_object( thread->fast_sync ); ++ if (thread->fast_alert_event) release_object( thread->fast_alert_event ); + } + + /* dump a thread on stdout for debugging purposes */ +@@ -486,13 +488,6 @@ static int thread_signaled( struct object *obj, struct wait_queue_entry *entry ) +- return mythread->state == TERMINATED && !mythread->exit_poll; ++ return (mythread->state == TERMINATED); + } + +-static int thread_get_esync_fd( struct object *obj, enum esync_type *type ) +-{ +- struct thread *thread = (struct thread *)obj; +- *type = ESYNC_MANUAL_SERVER; +- return thread->esync_fd; +-} +- + static unsigned int thread_map_access( struct object *obj, unsigned int access ) + { + access = default_map_access( obj, access ); +@@ -1087,9 +1082,6 @@ void wake_up( struct object *obj, int max ) + struct list *ptr; + int ret; + +- if (do_esync()) +- esync_wake_up( obj ); +- + LIST_FOR_EACH( ptr, &obj->wait_queue ) + { + struct wait_queue_entry *entry = LIST_ENTRY( ptr, struct wait_queue_entry, entry ); +@@ -1177,8 +1169,8 @@ static int queue_apc( struct process *process, struct thread *thread, struct thr + { + wake_thread( thread ); + +- if (do_esync() && queue == &thread->user_apc) +- esync_wake_fd( thread->esync_apc_fd ); ++ if (apc->call.type == APC_USER && thread->fast_alert_event) ++ set_event( thread->fast_alert_event ); + } + + return 1; +@@ -1211,6 +1203,8 @@ void thread_cancel_apc( struct thread *thread, struct object *owner, enum apc_ty + apc->executed = 1; + wake_up( &apc->obj, 0 ); + release_object( apc ); ++ if (list_empty( &thread->user_apc ) && thread->fast_alert_event) ++ reset_event( thread->fast_alert_event ); + return; + } + } +@@ -1225,11 +1219,10 @@ static struct thread_apc *thread_dequeue_apc( struct thread *thread, int system + { + apc = LIST_ENTRY( ptr, struct thread_apc, entry ); + list_remove( ptr ); +- } +- +- if (do_esync() && list_empty( &thread->system_apc ) && list_empty( &thread->user_apc )) +- esync_clear( thread->esync_apc_fd ); + ++ if (list_empty( &thread->user_apc ) && thread->fast_alert_event) ++ reset_event( thread->fast_alert_event ); ++ } + return apc; + } + +@@ -1305,26 +1302,6 @@ + return -1; + } + +-static void check_terminated( void *arg ) +-{ +- struct thread *thread = arg; +- assert( thread->obj.ops == &thread_ops ); +- assert( thread->state == TERMINATED ); +- +- /* don't wake up until the thread is really dead, to avoid race conditions */ +- if (thread->unix_tid != -1 && !kill( thread->unix_tid, 0 )) +- { +- thread->exit_poll = add_timeout_user( -TICKS_PER_SEC / 1000, check_terminated, thread ); +- return; +- } +- +- /* grab reference since object can be destroyed while trying to wake up */ +- grab_object( &thread->obj ); +- thread->exit_poll = NULL; +- wake_up( &thread->obj, 0 ); +- release_object( &thread->obj ); +-} +- + /* kill a thread on the spot */ + void kill_thread( struct thread *thread, int violent_death ) + { +@@ -1345,14 +1338,9 @@ void kill_thread( struct thread *thread, int violent_death ) + } + kill_console_processes( thread, 0 ); + abandon_mutexes( thread ); +- if (do_esync()) +- esync_abandon_mutexes( thread ); ++ fast_set_event( thread->fast_sync ); ++ wake_up( &thread->obj, 0 ); ++ if (violent_death) send_thread_signal( thread, SIGQUIT ); +- if (violent_death) +- { +- send_thread_signal( thread, SIGQUIT ); +- check_terminated( thread ); +- } +- else wake_up( &thread->obj, 0 ); + cleanup_thread( thread ); + remove_process_thread( thread->process, thread ); + release_object( thread ); +@@ -2094,3 +2086,12 @@ DECL_HANDLER(get_next_thread) + set_error( STATUS_NO_MORE_ENTRIES ); + release_object( process ); + } ++ ++DECL_HANDLER(get_fast_alert_event) ++{ ++ if (!current->fast_alert_event) ++ current->fast_alert_event = create_event( NULL, NULL, 0, 1, !list_empty( ¤t->user_apc ), NULL ); ++ ++ if (current->fast_alert_event) ++ reply->handle = alloc_handle( current->process, current->fast_alert_event, SYNCHRONIZE, 0 ); ++} +diff --git a/server/thread.h b/server/thread.h +index 10e9e28..cb4643a 100644 +--- a/server/thread.h ++++ b/server/thread.h +@@ -55,8 +55,6 @@ struct thread + struct process *process; + thread_id_t id; /* thread id */ + struct list mutex_list; /* list of currently owned mutexes */ +- int esync_fd; /* esync file descriptor (signalled on exit) */ +- int esync_apc_fd; /* esync apc fd (signalled when APCs are present) */ + unsigned int system_regs; /* which system regs have been set */ + struct msg_queue *queue; /* message queue */ + struct thread_wait *wait; /* current wait condition if sleeping */ +@@ -94,6 +92,7 @@ struct thread + data_size_t desc_len; /* thread description length in bytes */ + WCHAR *desc; /* thread description string */ +- struct timeout_user *exit_poll; /* poll if the thread/process has exited already */ ++ struct fast_sync *fast_sync; /* fast synchronization object */ ++ struct event *fast_alert_event; /* fast synchronization alert event */ + }; + + extern struct thread *current; +diff --git a/server/timer.c b/server/timer.c +index 36645a2..854a8e1 100644 +--- a/server/timer.c ++++ b/server/timer.c +@@ -35,7 +35,6 @@ + #include "file.h" + #include "handle.h" + #include "request.h" +-#include "esync.h" + + static const WCHAR timer_name[] = {'T','i','m','e','r'}; + +@@ -62,13 +61,13 @@ struct timer + struct thread *thread; /* thread that set the APC function */ + client_ptr_t callback; /* callback APC function */ + client_ptr_t arg; /* callback argument */ +- int esync_fd; /* esync file descriptor */ ++ struct fast_sync *fast_sync; /* fast synchronization object */ + }; + + static void timer_dump( struct object *obj, int verbose ); + static int timer_signaled( struct object *obj, struct wait_queue_entry *entry ); +-static int timer_get_esync_fd( struct object *obj, enum esync_type *type ); + static void timer_satisfied( struct object *obj, struct wait_queue_entry *entry ); ++static struct fast_sync *timer_get_fast_sync( struct object *obj ); + static void timer_destroy( struct object *obj ); + + static const struct object_ops timer_ops = +@@ -79,7 +78,6 @@ static const struct object_ops timer_ops = + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + timer_signaled, /* signaled */ +- timer_get_esync_fd, /* get_esync_fd */ + timer_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -92,6 +90,7 @@ static const struct object_ops timer_ops = + default_unlink_name, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ timer_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + timer_destroy /* destroy */ + }; +@@ -114,10 +113,7 @@ static struct timer *create_timer( struct object *root, const struct unicode_str + timer->period = 0; + timer->timeout = NULL; + timer->thread = NULL; +- timer->esync_fd = -1; +- +- if (do_esync()) +- timer->esync_fd = esync_create_fd( 0, 0 ); ++ timer->fast_sync = NULL; + } + } + return timer; +@@ -159,6 +155,7 @@ static void timer_callback( void *private ) + /* wake up waiters */ + timer->signaled = 1; + wake_up( &timer->obj, 0 ); ++ fast_set_event( timer->fast_sync ); + } + + /* cancel a running timer */ +@@ -189,9 +186,7 @@ static int set_timer( struct timer *timer, timeout_t expire, unsigned int period + { + period = 0; /* period doesn't make any sense for a manual timer */ + timer->signaled = 0; +- +- if (do_esync()) +- esync_clear( timer->esync_fd ); ++ fast_reset_event( timer->fast_sync ); + } + timer->when = (expire <= 0) ? expire - monotonic_time : max( expire, current_time ); + timer->period = period; +@@ -219,18 +214,24 @@ static int timer_signaled( struct object *obj, struct wait_queue_entry *entry ) + return timer->signaled; + } + +-static int timer_get_esync_fd( struct object *obj, enum esync_type *type ) ++static void timer_satisfied( struct object *obj, struct wait_queue_entry *entry ) + { + struct timer *timer = (struct timer *)obj; +- *type = timer->manual ? ESYNC_MANUAL_SERVER : ESYNC_AUTO_SERVER; +- return timer->esync_fd; ++ assert( obj->ops == &timer_ops ); ++ if (!timer->manual) timer->signaled = 0; + } + +-static void timer_satisfied( struct object *obj, struct wait_queue_entry *entry ) ++static struct fast_sync *timer_get_fast_sync( struct object *obj ) + { + struct timer *timer = (struct timer *)obj; +- assert( obj->ops == &timer_ops ); +- if (!timer->manual) timer->signaled = 0; ++ ++ if (!timer->fast_sync) ++ { ++ enum fast_sync_type type = timer->manual ? FAST_SYNC_MANUAL_SERVER : FAST_SYNC_AUTO_SERVER; ++ timer->fast_sync = fast_create_event( type, timer->signaled ); ++ } ++ if (timer->fast_sync) grab_object( timer->fast_sync ); ++ return timer->fast_sync; + } + + static void timer_destroy( struct object *obj ) +@@ -240,7 +241,7 @@ static void timer_destroy( struct object *obj ) + + if (timer->timeout) remove_timeout_user( timer->timeout ); + if (timer->thread) release_object( timer->thread ); +- if (do_esync()) close( timer->esync_fd ); ++ if (timer->fast_sync) release_object( timer->fast_sync ); + } + + /* create a timer */ +diff --git a/server/token.c b/server/token.c +index 8b4d2f0..42562fd 100644 +--- a/server/token.c ++++ b/server/token.c +@@ -143,7 +143,6 @@ static const struct object_ops token_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -156,6 +155,7 @@ static const struct object_ops token_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + token_destroy /* destroy */ + }; +diff --git a/server/trace.c b/server/trace.c +index af96565..c027f4b 100644 +--- a/server/trace.c ++++ b/server/trace.c +@@ -4619,67 +4619,56 @@ static void dump_get_next_thread_reply( const struct get_next_thread_reply *req + fprintf( stderr, " handle=%04x", req->handle ); + } + +-static void dump_create_esync_request( const struct create_esync_request *req ) ++static void dump_get_linux_sync_device_request( const struct get_linux_sync_device_request *req ) + { +- fprintf( stderr, " access=%08x", req->access ); +- fprintf( stderr, ", initval=%d", req->initval ); +- fprintf( stderr, ", type=%d", req->type ); +- fprintf( stderr, ", max=%d", req->max ); +- dump_varargs_object_attributes( ", objattr=", cur_size ); + } + +-static void dump_create_esync_reply( const struct create_esync_reply *req ) ++static void dump_get_linux_sync_device_reply( const struct get_linux_sync_device_reply *req ) + { + fprintf( stderr, " handle=%04x", req->handle ); +- fprintf( stderr, ", type=%d", req->type ); +- fprintf( stderr, ", shm_idx=%08x", req->shm_idx ); + } + +-static void dump_open_esync_request( const struct open_esync_request *req ) ++static void dump_get_linux_sync_obj_request( const struct get_linux_sync_obj_request *req ) + { +- fprintf( stderr, " access=%08x", req->access ); +- fprintf( stderr, ", attributes=%08x", req->attributes ); +- fprintf( stderr, ", rootdir=%04x", req->rootdir ); +- fprintf( stderr, ", type=%d", req->type ); +- dump_varargs_unicode_str( ", name=", cur_size ); ++ fprintf( stderr, " handle=%04x", req->handle ); + } + +-static void dump_open_esync_reply( const struct open_esync_reply *req ) ++static void dump_get_linux_sync_obj_reply( const struct get_linux_sync_obj_reply *req ) + { + fprintf( stderr, " handle=%04x", req->handle ); + fprintf( stderr, ", type=%d", req->type ); +- fprintf( stderr, ", shm_idx=%08x", req->shm_idx ); ++ fprintf( stderr, ", access=%08x", req->access ); + } + +-static void dump_get_esync_fd_request( const struct get_esync_fd_request *req ) ++static void dump_fast_select_queue_request( const struct fast_select_queue_request *req ) + { + fprintf( stderr, " handle=%04x", req->handle ); + } + +-static void dump_get_esync_fd_reply( const struct get_esync_fd_reply *req ) ++static void dump_fast_unselect_queue_request( const struct fast_unselect_queue_request *req ) + { +- fprintf( stderr, " type=%d", req->type ); +- fprintf( stderr, ", shm_idx=%08x", req->shm_idx ); ++ fprintf( stderr, " handle=%04x", req->handle ); ++ fprintf( stderr, ", signaled=%d", req->signaled ); + } + +-static void dump_esync_msgwait_request( const struct esync_msgwait_request *req ) ++static void dump_get_fast_alert_event_request( const struct get_fast_alert_event_request *req ) + { +- fprintf( stderr, " in_msgwait=%d", req->in_msgwait ); + } + + static void dump_set_keyboard_repeat_request( const struct set_keyboard_repeat_request *req ) + { + fprintf( stderr, " enable=%d", req->enable ); + fprintf( stderr, ", delay=%d", req->delay ); + fprintf( stderr, ", period=%d", req->period ); + } + + static void dump_set_keyboard_repeat_reply( const struct set_keyboard_repeat_reply *req ) + { + fprintf( stderr, " enable=%d", req->enable ); + } + +-static void dump_get_esync_apc_fd_request( const struct get_esync_apc_fd_request *req ) ++static void dump_get_fast_alert_event_reply( const struct get_fast_alert_event_reply *req ) + { ++ fprintf( stderr, " handle=%04x", req->handle ); + } + + static const dump_func req_dumpers[REQ_NB_REQUESTS] = { +@@ -4959,11 +4948,11 @@ static const dump_func req_dumpers[REQ_NB_REQUESTS] = { + (dump_func)dump_suspend_process_request, + (dump_func)dump_resume_process_request, + (dump_func)dump_get_next_thread_request, +- (dump_func)dump_create_esync_request, +- (dump_func)dump_open_esync_request, +- (dump_func)dump_get_esync_fd_request, +- (dump_func)dump_esync_msgwait_request, +- (dump_func)dump_get_esync_apc_fd_request, ++ (dump_func)dump_get_linux_sync_device_request, ++ (dump_func)dump_get_linux_sync_obj_request, ++ (dump_func)dump_fast_select_queue_request, ++ (dump_func)dump_fast_unselect_queue_request, ++ (dump_func)dump_get_fast_alert_event_request, + }; + + static const dump_func reply_dumpers[REQ_NB_REQUESTS] = { +@@ -5254,12 +5243,12 @@ static const dump_func reply_dumpers[REQ_NB_REQUESTS] = { + NULL, + NULL, + (dump_func)dump_get_next_thread_reply, +- (dump_func)dump_create_esync_reply, +- (dump_func)dump_open_esync_reply, +- (dump_func)dump_get_esync_fd_reply, ++ (dump_func)dump_get_linux_sync_device_reply, ++ (dump_func)dump_get_linux_sync_obj_reply, + NULL, + (dump_func)dump_set_keyboard_repeat_reply, + NULL, ++ (dump_func)dump_get_fast_alert_event_reply, + }; + + static const char * const req_names[REQ_NB_REQUESTS] = { +@@ -5549,11 +5538,11 @@ static const char * const req_names[REQ_NB_REQUESTS] = { + "suspend_process", + "resume_process", + "get_next_thread", +- "create_esync", +- "open_esync", +- "get_esync_fd", +- "esync_msgwait", ++ "get_linux_sync_device", ++ "get_linux_sync_obj", ++ "fast_select_queue", ++ "fast_unselect_queue", + "set_keyboard_repeat", +- "get_esync_apc_fd", ++ "get_fast_alert_event", + }; + + static const struct +diff --git a/server/window.c b/server/window.c +index feac54e..f52e118 100644 +--- a/server/window.c ++++ b/server/window.c +@@ -108,7 +108,6 @@ static const struct object_ops window_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -121,6 +120,7 @@ static const struct object_ops window_ops = + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + window_destroy /* destroy */ + }; +diff --git a/server/winstation.c b/server/winstation.c +index 4ef21d9..2719c9e 100644 +--- a/server/winstation.c ++++ b/server/winstation.c +@@ -76,7 +76,6 @@ static const struct object_ops winstation_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -89,6 +88,7 @@ static const struct object_ops winstation_ops = + default_unlink_name, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + winstation_close_handle, /* close_handle */ + winstation_destroy /* destroy */ + }; +@@ -117,7 +117,6 @@ static const struct object_ops desktop_ops = + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ +- NULL, /* get_esync_fd */ + NULL, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ +@@ -130,6 +129,7 @@ static const struct object_ops desktop_ops = + default_unlink_name, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ ++ no_get_fast_sync, /* get_fast_sync */ + desktop_close_handle, /* close_handle */ + desktop_destroy /* destroy */ + }; +From 9c4c5779b6f87c27e73a4cb5a0973896c26c8d31 Mon Sep 17 00:00:00 2001 +From: Kirill Artemev +Date: Wed, 3 Jul 2024 18:39:15 +0500 +Subject: [PATCH] test3 + +Signed-off-by: Kirill Artemev +--- + server/queue.c | 55 +++++++++++++++++++++++++++++++++++++++++--------- + 1 file changed, 46 insertions(+), 9 deletions(-) + +diff --git a/server/queue.c b/server/queue.c +index 0c9c043d68f..c911ae3e574 100644 +--- a/server/queue.c ++++ b/server/queue.c +@@ -4020,21 +4020,60 @@ DECL_HANDLER(update_rawinput_devices) + } + } + +-DECL_HANDLER(esync_msgwait) ++DECL_HANDLER(fast_select_queue) + { +- struct msg_queue *queue = get_current_queue(); ++ struct msg_queue *queue; + const queue_shm_t *queue_shm; + +- if (!queue) return; ++ if (!(queue = (struct msg_queue *)get_handle_obj( current->process, req->handle, ++ SYNCHRONIZE, &msg_queue_ops ))) ++ return; + queue_shm = queue->shared; +- queue->esync_in_msgwait = req->in_msgwait; ++ /* a thread can only wait on its own queue */ ++ if (current->queue != queue || queue->in_fast_wait) ++ { ++ set_error( STATUS_ACCESS_DENIED ); ++ } ++ else ++ { ++ if (current->process->idle_event && !(queue_shm->wake_mask & QS_SMRESULT)) ++ set_event( current->process->idle_event ); ++ ++ if (queue->fd) ++ set_fd_events( queue->fd, POLLIN ); + +- if (current->process->idle_event && !(queue_shm->wake_mask & QS_SMRESULT)) +- set_event( current->process->idle_event ); ++ queue->in_fast_wait = 1; ++ } + +- /* and start/stop waiting on the driver */ +- if (queue->fd) +- set_fd_events( queue->fd, req->in_msgwait ? POLLIN : 0 ); ++ release_object( queue ); ++} ++ ++DECL_HANDLER(fast_unselect_queue) ++{ ++ struct msg_queue *queue; ++ const queue_shm_t *queue_shm; ++ ++ if (!(queue = (struct msg_queue *)get_handle_obj( current->process, req->handle, ++ SYNCHRONIZE, &msg_queue_ops ))) ++ return; ++ ++ queue_shm = queue->shared; ++ if (current->queue != queue || !queue->in_fast_wait) ++ { ++ set_error( STATUS_ACCESS_DENIED ); ++ } ++ else ++ { ++ if (queue->fd) ++ set_fd_events( queue->fd, 0 ); ++ ++ if (req->signaled) ++ msg_queue_satisfied( &queue->obj, NULL ); ++ ++ queue->in_fast_wait = 0; ++ } ++ ++ release_object( queue ); + } + + DECL_HANDLER(set_keyboard_repeat) +-- +2.45.2 diff --git a/wine-tkg-git/wine-tkg-patches/misc/fastsync/ntsync5-staging-protonify.patch b/wine-tkg-git/wine-tkg-patches/misc/fastsync/ntsync5-staging-protonify.patch index 1ccca045d..eaa94ef95 100644 --- a/wine-tkg-git/wine-tkg-patches/misc/fastsync/ntsync5-staging-protonify.patch +++ b/wine-tkg-git/wine-tkg-patches/misc/fastsync/ntsync5-staging-protonify.patch @@ -780,12 +780,12 @@ index edfeb03..0000000 -#pragma makedep unix -#endif - +-#include "config.h" +- -#ifndef _GNU_SOURCE -#define _GNU_SOURCE -#endif - --#include "config.h" -- -#include -#include -#include diff --git a/wine-tkg-git/wine-tkg-patches/misc/fastsync/ntsync5-staging.patch b/wine-tkg-git/wine-tkg-patches/misc/fastsync/ntsync5-staging.patch index 906acddc8..403f9678a 100644 --- a/wine-tkg-git/wine-tkg-patches/misc/fastsync/ntsync5-staging.patch +++ b/wine-tkg-git/wine-tkg-patches/misc/fastsync/ntsync5-staging.patch @@ -780,12 +780,12 @@ index edfeb03..0000000 -#pragma makedep unix -#endif - +-#include "config.h" +- -#ifndef _GNU_SOURCE -#define _GNU_SOURCE -#endif - --#include "config.h" -- -#include -#include -#include