From 80b06ad2fd1928509b359da32e70fe906b751605 Mon Sep 17 00:00:00 2001 From: Javinator9889 Date: Mon, 16 Nov 2020 19:21:30 +0100 Subject: [PATCH] futex: implementation of WM (wait multiple) --- include/uapi/linux/futex.h | 118 ++++++++++++++++++++----------------- kernel/futex.c | 7 +-- 2 files changed, 67 insertions(+), 58 deletions(-) diff --git a/include/uapi/linux/futex.h b/include/uapi/linux/futex.h index a89eb0accd5e..d634a4dec799 100644 --- a/include/uapi/linux/futex.h +++ b/include/uapi/linux/futex.h @@ -21,6 +21,7 @@ #define FUTEX_WAKE_BITSET 10 #define FUTEX_WAIT_REQUEUE_PI 11 #define FUTEX_CMP_REQUEUE_PI 12 +#define FUTEX_WAIT_MULTIPLE 31 #define FUTEX_PRIVATE_FLAG 128 #define FUTEX_CLOCK_REALTIME 256 @@ -41,59 +42,6 @@ #define FUTEX_CMP_REQUEUE_PI_PRIVATE (FUTEX_CMP_REQUEUE_PI | \ FUTEX_PRIVATE_FLAG) -/* - * Support for robust futexes: the kernel cleans up held futexes at - * thread exit time. - */ - -/* - * Per-lock list entry - embedded in user-space locks, somewhere close - * to the futex field. (Note: user-space uses a double-linked list to - * achieve O(1) list add and remove, but the kernel only needs to know - * about the forward link) - * - * NOTE: this structure is part of the syscall ABI, and must not be - * changed. - */ -struct robust_list { - struct robust_list __user *next; -}; - -/* - * Per-thread list head: - * - * NOTE: this structure is part of the syscall ABI, and must only be - * changed if the change is first communicated with the glibc folks. - * (When an incompatible change is done, we'll increase the structure - * size, which glibc will detect) - */ -struct robust_list_head { - /* - * The head of the list. Points back to itself if empty: - */ - struct robust_list list; - - /* - * This relative offset is set by user-space, it gives the kernel - * the relative position of the futex field to examine. This way - * we keep userspace flexible, to freely shape its data-structure, - * without hardcoding any particular offset into the kernel: - */ - long futex_offset; - - /* - * The death of the thread may race with userspace setting - * up a lock's links. So to handle this race, userspace first - * sets this field to the address of the to-be-taken lock, - * then does the lock acquire, and then adds itself to the - * list, and then clears this field. Hence the kernel will - * always have full knowledge of all locks that the thread - * _might_ have taken. We check the owner TID in any case, - * so only truly owned locks will be handled. - */ - struct robust_list __user *list_op_pending; -}; - /* * Are there any waiters for this robust futex: */ @@ -150,4 +98,68 @@ struct robust_list_head { (((op & 0xf) << 28) | ((cmp & 0xf) << 24) \ | ((oparg & 0xfff) << 12) | (cmparg & 0xfff)) +/* + * Include here the ASM files so all required defs are present + */ +#include + +/* + * Support for robust futexes: the kernel cleans up held futexes at + * thread exit time. + */ + +/* + * Per-lock list entry - embedded in user-space locks, somewhere close + * to the futex field. (Note: user-space uses a double-linked list to + * achieve O(1) list add and remove, but the kernel only needs to know + * about the forward link) + * + * NOTE: this structure is part of the syscall ABI, and must not be + * changed. + */ +struct robust_list { + struct robust_list __user *next; +}; + +/* + * Per-thread list head: + * + * NOTE: this structure is part of the syscall ABI, and must only be + * changed if the change is first communicated with the glibc folks. + * (When an incompatible change is done, we'll increase the structure + * size, which glibc will detect) + */ +struct robust_list_head { + /* + * The head of the list. Points back to itself if empty: + */ + struct robust_list list; + + /* + * This relative offset is set by user-space, it gives the kernel + * the relative position of the futex field to examine. This way + * we keep userspace flexible, to freely shape its data-structure, + * without hardcoding any particular offset into the kernel: + */ + long futex_offset; + + /* + * The death of the thread may race with userspace setting + * up a lock's links. So to handle this race, userspace first + * sets this field to the address of the to-be-taken lock, + * then does the lock acquire, and then adds itself to the + * list, and then clears this field. Hence the kernel will + * always have full knowledge of all locks that the thread + * _might_ have taken. We check the owner TID in any case, + * so only truly owned locks will be handled. + */ + struct robust_list __user *list_op_pending; +}; + +struct futex_wait_block { + __u32 __user *uaddr; + __u32 val; + __u32 bitset; +}; + #endif /* _UAPI_LINUX_FUTEX_H */ diff --git a/kernel/futex.c b/kernel/futex.c index 18ea765a0f4d..0f781f5880a9 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -40,8 +40,7 @@ #include #include #include - -#include +#include #include "locking/rtmutex_common.h" @@ -166,10 +165,8 @@ static int __read_mostly futex_cmpxchg_enabled; #endif #define FLAGS_CLOCKRT 0x02 #define FLAGS_HAS_TIMEOUT 0x04 -<<<<<<< HEAD #define FLAGS_WAKE_MULTIPLE 0x08 -======= - */ + struct futex_pi_state { /* * list of 'owned' pi_state instances - these have to be