Skip to content
This repository has been archived by the owner on Apr 21, 2021. It is now read-only.

Commit

Permalink
futex: implementation of WM (wait multiple)
Browse files Browse the repository at this point in the history
  • Loading branch information
Javinator9889 committed Nov 16, 2020
1 parent 4d30ad8 commit 80b06ad
Show file tree
Hide file tree
Showing 2 changed files with 67 additions and 58 deletions.
118 changes: 65 additions & 53 deletions include/uapi/linux/futex.h
Expand Up @@ -21,6 +21,7 @@
#define FUTEX_WAKE_BITSET 10
#define FUTEX_WAIT_REQUEUE_PI 11
#define FUTEX_CMP_REQUEUE_PI 12
#define FUTEX_WAIT_MULTIPLE 31

#define FUTEX_PRIVATE_FLAG 128
#define FUTEX_CLOCK_REALTIME 256
Expand All @@ -41,59 +42,6 @@
#define FUTEX_CMP_REQUEUE_PI_PRIVATE (FUTEX_CMP_REQUEUE_PI | \
FUTEX_PRIVATE_FLAG)

/*
* Support for robust futexes: the kernel cleans up held futexes at
* thread exit time.
*/

/*
* Per-lock list entry - embedded in user-space locks, somewhere close
* to the futex field. (Note: user-space uses a double-linked list to
* achieve O(1) list add and remove, but the kernel only needs to know
* about the forward link)
*
* NOTE: this structure is part of the syscall ABI, and must not be
* changed.
*/
struct robust_list {
struct robust_list __user *next;
};

/*
* Per-thread list head:
*
* NOTE: this structure is part of the syscall ABI, and must only be
* changed if the change is first communicated with the glibc folks.
* (When an incompatible change is done, we'll increase the structure
* size, which glibc will detect)
*/
struct robust_list_head {
/*
* The head of the list. Points back to itself if empty:
*/
struct robust_list list;

/*
* This relative offset is set by user-space, it gives the kernel
* the relative position of the futex field to examine. This way
* we keep userspace flexible, to freely shape its data-structure,
* without hardcoding any particular offset into the kernel:
*/
long futex_offset;

/*
* The death of the thread may race with userspace setting
* up a lock's links. So to handle this race, userspace first
* sets this field to the address of the to-be-taken lock,
* then does the lock acquire, and then adds itself to the
* list, and then clears this field. Hence the kernel will
* always have full knowledge of all locks that the thread
* _might_ have taken. We check the owner TID in any case,
* so only truly owned locks will be handled.
*/
struct robust_list __user *list_op_pending;
};

/*
* Are there any waiters for this robust futex:
*/
Expand Down Expand Up @@ -150,4 +98,68 @@ struct robust_list_head {
(((op & 0xf) << 28) | ((cmp & 0xf) << 24) \
| ((oparg & 0xfff) << 12) | (cmparg & 0xfff))

/*
* Include here the ASM files so all required defs are present
*/
#include <asm/futex.h>

/*
* Support for robust futexes: the kernel cleans up held futexes at
* thread exit time.
*/

/*
* Per-lock list entry - embedded in user-space locks, somewhere close
* to the futex field. (Note: user-space uses a double-linked list to
* achieve O(1) list add and remove, but the kernel only needs to know
* about the forward link)
*
* NOTE: this structure is part of the syscall ABI, and must not be
* changed.
*/
struct robust_list {
struct robust_list __user *next;
};

/*
* Per-thread list head:
*
* NOTE: this structure is part of the syscall ABI, and must only be
* changed if the change is first communicated with the glibc folks.
* (When an incompatible change is done, we'll increase the structure
* size, which glibc will detect)
*/
struct robust_list_head {
/*
* The head of the list. Points back to itself if empty:
*/
struct robust_list list;

/*
* This relative offset is set by user-space, it gives the kernel
* the relative position of the futex field to examine. This way
* we keep userspace flexible, to freely shape its data-structure,
* without hardcoding any particular offset into the kernel:
*/
long futex_offset;

/*
* The death of the thread may race with userspace setting
* up a lock's links. So to handle this race, userspace first
* sets this field to the address of the to-be-taken lock,
* then does the lock acquire, and then adds itself to the
* list, and then clears this field. Hence the kernel will
* always have full knowledge of all locks that the thread
* _might_ have taken. We check the owner TID in any case,
* so only truly owned locks will be handled.
*/
struct robust_list __user *list_op_pending;
};

struct futex_wait_block {
__u32 __user *uaddr;
__u32 val;
__u32 bitset;
};

#endif /* _UAPI_LINUX_FUTEX_H */
7 changes: 2 additions & 5 deletions kernel/futex.c
Expand Up @@ -40,8 +40,7 @@
#include <linux/memblock.h>
#include <linux/fault-inject.h>
#include <linux/time_namespace.h>

#include <asm/futex.h>
#include <linux/futex.h>

#include "locking/rtmutex_common.h"

Expand Down Expand Up @@ -166,10 +165,8 @@ static int __read_mostly futex_cmpxchg_enabled;
#endif
#define FLAGS_CLOCKRT 0x02
#define FLAGS_HAS_TIMEOUT 0x04
<<<<<<< HEAD
#define FLAGS_WAKE_MULTIPLE 0x08
=======
*/

struct futex_pi_state {
/*
* list of 'owned' pi_state instances - these have to be
Expand Down

0 comments on commit 80b06ad

Please sign in to comment.