Skip to content

Commit 5730b27

Browse files
committed
io_uring: move cred assignment into io_issue_sqe()
If we move it in there, then we no longer have to care about it in io-wq. This means we can drop the cred handling in io-wq, and we can drop the REQ_F_WORK_INITIALIZED flag and async init functions as that was the last user of it since we moved to the new workers. Then we can also drop io_wq_work->creds, and just hold the personality u16 in there instead. Suggested-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 1575f21 commit 5730b27

3 files changed

Lines changed: 22 additions & 81 deletions

File tree

fs/io-wq.c

Lines changed: 0 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -52,9 +52,6 @@ struct io_worker {
5252
struct io_wq_work *cur_work;
5353
spinlock_t lock;
5454

55-
const struct cred *cur_creds;
56-
const struct cred *saved_creds;
57-
5855
struct completion ref_done;
5956
struct completion started;
6057

@@ -180,11 +177,6 @@ static void io_worker_exit(struct io_worker *worker)
180177
worker->flags = 0;
181178
preempt_enable();
182179

183-
if (worker->saved_creds) {
184-
revert_creds(worker->saved_creds);
185-
worker->cur_creds = worker->saved_creds = NULL;
186-
}
187-
188180
raw_spin_lock_irq(&wqe->lock);
189181
if (flags & IO_WORKER_F_FREE)
190182
hlist_nulls_del_rcu(&worker->nulls_node);
@@ -326,10 +318,6 @@ static void __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker)
326318
worker->flags |= IO_WORKER_F_FREE;
327319
hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
328320
}
329-
if (worker->saved_creds) {
330-
revert_creds(worker->saved_creds);
331-
worker->cur_creds = worker->saved_creds = NULL;
332-
}
333321
}
334322

335323
static inline unsigned int io_get_work_hash(struct io_wq_work *work)
@@ -404,18 +392,6 @@ static void io_flush_signals(void)
404392
}
405393
}
406394

407-
static void io_wq_switch_creds(struct io_worker *worker,
408-
struct io_wq_work *work)
409-
{
410-
const struct cred *old_creds = override_creds(work->creds);
411-
412-
worker->cur_creds = work->creds;
413-
if (worker->saved_creds)
414-
put_cred(old_creds); /* creds set by previous switch */
415-
else
416-
worker->saved_creds = old_creds;
417-
}
418-
419395
static void io_assign_current_work(struct io_worker *worker,
420396
struct io_wq_work *work)
421397
{
@@ -465,8 +441,6 @@ static void io_worker_handle_work(struct io_worker *worker)
465441
unsigned int hash = io_get_work_hash(work);
466442

467443
next_hashed = wq_next_work(work);
468-
if (work->creds && worker->cur_creds != work->creds)
469-
io_wq_switch_creds(worker, work);
470444
wq->do_work(work);
471445
io_assign_current_work(worker, NULL);
472446

fs/io-wq.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -79,8 +79,8 @@ static inline void wq_list_del(struct io_wq_work_list *list,
7979

8080
struct io_wq_work {
8181
struct io_wq_work_node list;
82-
const struct cred *creds;
8382
unsigned flags;
83+
unsigned short personality;
8484
};
8585

8686
static inline struct io_wq_work *wq_next_work(struct io_wq_work *work)

fs/io_uring.c

Lines changed: 21 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -688,7 +688,6 @@ enum {
688688
REQ_F_POLLED_BIT,
689689
REQ_F_BUFFER_SELECTED_BIT,
690690
REQ_F_NO_FILE_TABLE_BIT,
691-
REQ_F_WORK_INITIALIZED_BIT,
692691
REQ_F_LTIMEOUT_ACTIVE_BIT,
693692
REQ_F_COMPLETE_INLINE_BIT,
694693

@@ -730,8 +729,6 @@ enum {
730729
REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
731730
/* doesn't need file table for this request */
732731
REQ_F_NO_FILE_TABLE = BIT(REQ_F_NO_FILE_TABLE_BIT),
733-
/* io_wq_work is initialized */
734-
REQ_F_WORK_INITIALIZED = BIT(REQ_F_WORK_INITIALIZED_BIT),
735732
/* linked timeout is active, i.e. prepared by link's head */
736733
REQ_F_LTIMEOUT_ACTIVE = BIT(REQ_F_LTIMEOUT_ACTIVE_BIT),
737734
/* completion is deferred through io_comp_state */
@@ -1094,24 +1091,6 @@ static inline void req_set_fail_links(struct io_kiocb *req)
10941091
req->flags |= REQ_F_FAIL_LINK;
10951092
}
10961093

1097-
static inline void __io_req_init_async(struct io_kiocb *req)
1098-
{
1099-
memset(&req->work, 0, sizeof(req->work));
1100-
req->flags |= REQ_F_WORK_INITIALIZED;
1101-
}
1102-
1103-
/*
1104-
* Note: must call io_req_init_async() for the first time you
1105-
* touch any members of io_wq_work.
1106-
*/
1107-
static inline void io_req_init_async(struct io_kiocb *req)
1108-
{
1109-
if (req->flags & REQ_F_WORK_INITIALIZED)
1110-
return;
1111-
1112-
__io_req_init_async(req);
1113-
}
1114-
11151094
static void io_ring_ctx_ref_free(struct percpu_ref *ref)
11161095
{
11171096
struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
@@ -1196,13 +1175,6 @@ static bool req_need_defer(struct io_kiocb *req, u32 seq)
11961175

11971176
static void io_req_clean_work(struct io_kiocb *req)
11981177
{
1199-
if (!(req->flags & REQ_F_WORK_INITIALIZED))
1200-
return;
1201-
1202-
if (req->work.creds) {
1203-
put_cred(req->work.creds);
1204-
req->work.creds = NULL;
1205-
}
12061178
if (req->flags & REQ_F_INFLIGHT) {
12071179
struct io_ring_ctx *ctx = req->ctx;
12081180
struct io_uring_task *tctx = req->task->io_uring;
@@ -1215,16 +1187,13 @@ static void io_req_clean_work(struct io_kiocb *req)
12151187
if (atomic_read(&tctx->in_idle))
12161188
wake_up(&tctx->wait);
12171189
}
1218-
1219-
req->flags &= ~REQ_F_WORK_INITIALIZED;
12201190
}
12211191

12221192
static void io_req_track_inflight(struct io_kiocb *req)
12231193
{
12241194
struct io_ring_ctx *ctx = req->ctx;
12251195

12261196
if (!(req->flags & REQ_F_INFLIGHT)) {
1227-
io_req_init_async(req);
12281197
req->flags |= REQ_F_INFLIGHT;
12291198

12301199
spin_lock_irq(&ctx->inflight_lock);
@@ -1238,8 +1207,6 @@ static void io_prep_async_work(struct io_kiocb *req)
12381207
const struct io_op_def *def = &io_op_defs[req->opcode];
12391208
struct io_ring_ctx *ctx = req->ctx;
12401209

1241-
io_req_init_async(req);
1242-
12431210
if (req->flags & REQ_F_FORCE_ASYNC)
12441211
req->work.flags |= IO_WQ_WORK_CONCURRENT;
12451212

@@ -1250,8 +1217,6 @@ static void io_prep_async_work(struct io_kiocb *req)
12501217
if (def->unbound_nonreg_file)
12511218
req->work.flags |= IO_WQ_WORK_UNBOUND;
12521219
}
1253-
if (!req->work.creds)
1254-
req->work.creds = get_current_cred();
12551220
}
12561221

12571222
static void io_prep_async_link(struct io_kiocb *req)
@@ -3578,7 +3543,6 @@ static int __io_splice_prep(struct io_kiocb *req,
35783543
* Splice operation will be punted aync, and here need to
35793544
* modify io_wq_work.flags, so initialize io_wq_work firstly.
35803545
*/
3581-
io_req_init_async(req);
35823546
req->work.flags |= IO_WQ_WORK_UNBOUND;
35833547
}
35843548

@@ -5935,8 +5899,22 @@ static void __io_clean_op(struct io_kiocb *req)
59355899
static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
59365900
{
59375901
struct io_ring_ctx *ctx = req->ctx;
5902+
const struct cred *creds = NULL;
59385903
int ret;
59395904

5905+
if (req->work.personality) {
5906+
const struct cred *new_creds;
5907+
5908+
if (!(issue_flags & IO_URING_F_NONBLOCK))
5909+
mutex_lock(&ctx->uring_lock);
5910+
new_creds = idr_find(&ctx->personality_idr, req->work.personality);
5911+
if (!(issue_flags & IO_URING_F_NONBLOCK))
5912+
mutex_unlock(&ctx->uring_lock);
5913+
if (!new_creds)
5914+
return -EINVAL;
5915+
creds = override_creds(new_creds);
5916+
}
5917+
59405918
switch (req->opcode) {
59415919
case IORING_OP_NOP:
59425920
ret = io_nop(req, issue_flags);
@@ -6043,6 +6021,9 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
60436021
break;
60446022
}
60456023

6024+
if (creds)
6025+
revert_creds(creds);
6026+
60466027
if (ret)
60476028
return ret;
60486029

@@ -6206,18 +6187,10 @@ static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
62066187
static void __io_queue_sqe(struct io_kiocb *req)
62076188
{
62086189
struct io_kiocb *linked_timeout = io_prep_linked_timeout(req);
6209-
const struct cred *old_creds = NULL;
62106190
int ret;
62116191

6212-
if ((req->flags & REQ_F_WORK_INITIALIZED) && req->work.creds &&
6213-
req->work.creds != current_cred())
6214-
old_creds = override_creds(req->work.creds);
6215-
62166192
ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
62176193

6218-
if (old_creds)
6219-
revert_creds(old_creds);
6220-
62216194
/*
62226195
* We async punt it if the file wasn't marked NOWAIT, or if the file
62236196
* doesn't support non-blocking read/write attempts
@@ -6304,7 +6277,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
63046277
{
63056278
struct io_submit_state *state;
63066279
unsigned int sqe_flags;
6307-
int id, ret = 0;
6280+
int ret = 0;
63086281

63096282
req->opcode = READ_ONCE(sqe->opcode);
63106283
/* same numerical values with corresponding REQ_F_*, safe to copy */
@@ -6336,15 +6309,9 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
63366309
!io_op_defs[req->opcode].buffer_select)
63376310
return -EOPNOTSUPP;
63386311

6339-
id = READ_ONCE(sqe->personality);
6340-
if (id) {
6341-
__io_req_init_async(req);
6342-
req->work.creds = idr_find(&ctx->personality_idr, id);
6343-
if (unlikely(!req->work.creds))
6344-
return -EINVAL;
6345-
get_cred(req->work.creds);
6346-
}
6347-
6312+
req->work.list.next = NULL;
6313+
req->work.flags = 0;
6314+
req->work.personality = READ_ONCE(sqe->personality);
63486315
state = &ctx->submit_state;
63496316

63506317
/*

0 commit comments

Comments
 (0)