@@ -688,7 +688,6 @@ enum {
688688 REQ_F_POLLED_BIT ,
689689 REQ_F_BUFFER_SELECTED_BIT ,
690690 REQ_F_NO_FILE_TABLE_BIT ,
691- REQ_F_WORK_INITIALIZED_BIT ,
692691 REQ_F_LTIMEOUT_ACTIVE_BIT ,
693692 REQ_F_COMPLETE_INLINE_BIT ,
694693
@@ -730,8 +729,6 @@ enum {
730729 REQ_F_BUFFER_SELECTED = BIT (REQ_F_BUFFER_SELECTED_BIT ),
731730 /* doesn't need file table for this request */
732731 REQ_F_NO_FILE_TABLE = BIT (REQ_F_NO_FILE_TABLE_BIT ),
733- /* io_wq_work is initialized */
734- REQ_F_WORK_INITIALIZED = BIT (REQ_F_WORK_INITIALIZED_BIT ),
735732 /* linked timeout is active, i.e. prepared by link's head */
736733 REQ_F_LTIMEOUT_ACTIVE = BIT (REQ_F_LTIMEOUT_ACTIVE_BIT ),
737734 /* completion is deferred through io_comp_state */
@@ -1094,24 +1091,6 @@ static inline void req_set_fail_links(struct io_kiocb *req)
10941091 req -> flags |= REQ_F_FAIL_LINK ;
10951092}
10961093
1097- static inline void __io_req_init_async (struct io_kiocb * req )
1098- {
1099- memset (& req -> work , 0 , sizeof (req -> work ));
1100- req -> flags |= REQ_F_WORK_INITIALIZED ;
1101- }
1102-
1103- /*
1104- * Note: must call io_req_init_async() for the first time you
1105- * touch any members of io_wq_work.
1106- */
1107- static inline void io_req_init_async (struct io_kiocb * req )
1108- {
1109- if (req -> flags & REQ_F_WORK_INITIALIZED )
1110- return ;
1111-
1112- __io_req_init_async (req );
1113- }
1114-
11151094static void io_ring_ctx_ref_free (struct percpu_ref * ref )
11161095{
11171096 struct io_ring_ctx * ctx = container_of (ref , struct io_ring_ctx , refs );
@@ -1196,13 +1175,6 @@ static bool req_need_defer(struct io_kiocb *req, u32 seq)
11961175
11971176static void io_req_clean_work (struct io_kiocb * req )
11981177{
1199- if (!(req -> flags & REQ_F_WORK_INITIALIZED ))
1200- return ;
1201-
1202- if (req -> work .creds ) {
1203- put_cred (req -> work .creds );
1204- req -> work .creds = NULL ;
1205- }
12061178 if (req -> flags & REQ_F_INFLIGHT ) {
12071179 struct io_ring_ctx * ctx = req -> ctx ;
12081180 struct io_uring_task * tctx = req -> task -> io_uring ;
@@ -1215,16 +1187,13 @@ static void io_req_clean_work(struct io_kiocb *req)
12151187 if (atomic_read (& tctx -> in_idle ))
12161188 wake_up (& tctx -> wait );
12171189 }
1218-
1219- req -> flags &= ~REQ_F_WORK_INITIALIZED ;
12201190}
12211191
12221192static void io_req_track_inflight (struct io_kiocb * req )
12231193{
12241194 struct io_ring_ctx * ctx = req -> ctx ;
12251195
12261196 if (!(req -> flags & REQ_F_INFLIGHT )) {
1227- io_req_init_async (req );
12281197 req -> flags |= REQ_F_INFLIGHT ;
12291198
12301199 spin_lock_irq (& ctx -> inflight_lock );
@@ -1238,8 +1207,6 @@ static void io_prep_async_work(struct io_kiocb *req)
12381207 const struct io_op_def * def = & io_op_defs [req -> opcode ];
12391208 struct io_ring_ctx * ctx = req -> ctx ;
12401209
1241- io_req_init_async (req );
1242-
12431210 if (req -> flags & REQ_F_FORCE_ASYNC )
12441211 req -> work .flags |= IO_WQ_WORK_CONCURRENT ;
12451212
@@ -1250,8 +1217,6 @@ static void io_prep_async_work(struct io_kiocb *req)
12501217 if (def -> unbound_nonreg_file )
12511218 req -> work .flags |= IO_WQ_WORK_UNBOUND ;
12521219 }
1253- if (!req -> work .creds )
1254- req -> work .creds = get_current_cred ();
12551220}
12561221
12571222static void io_prep_async_link (struct io_kiocb * req )
@@ -3578,7 +3543,6 @@ static int __io_splice_prep(struct io_kiocb *req,
35783543 * Splice operation will be punted aync, and here need to
35793544 * modify io_wq_work.flags, so initialize io_wq_work firstly.
35803545 */
3581- io_req_init_async (req );
35823546 req -> work .flags |= IO_WQ_WORK_UNBOUND ;
35833547 }
35843548
@@ -5935,8 +5899,22 @@ static void __io_clean_op(struct io_kiocb *req)
59355899static int io_issue_sqe (struct io_kiocb * req , unsigned int issue_flags )
59365900{
59375901 struct io_ring_ctx * ctx = req -> ctx ;
5902+ const struct cred * creds = NULL ;
59385903 int ret ;
59395904
5905+ if (req -> work .personality ) {
5906+ const struct cred * new_creds ;
5907+
5908+ if (!(issue_flags & IO_URING_F_NONBLOCK ))
5909+ mutex_lock (& ctx -> uring_lock );
5910+ new_creds = idr_find (& ctx -> personality_idr , req -> work .personality );
5911+ if (!(issue_flags & IO_URING_F_NONBLOCK ))
5912+ mutex_unlock (& ctx -> uring_lock );
5913+ if (!new_creds )
5914+ return - EINVAL ;
5915+ creds = override_creds (new_creds );
5916+ }
5917+
59405918 switch (req -> opcode ) {
59415919 case IORING_OP_NOP :
59425920 ret = io_nop (req , issue_flags );
@@ -6043,6 +6021,9 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
60436021 break ;
60446022 }
60456023
6024+ if (creds )
6025+ revert_creds (creds );
6026+
60466027 if (ret )
60476028 return ret ;
60486029
@@ -6206,18 +6187,10 @@ static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
62066187static void __io_queue_sqe (struct io_kiocb * req )
62076188{
62086189 struct io_kiocb * linked_timeout = io_prep_linked_timeout (req );
6209- const struct cred * old_creds = NULL ;
62106190 int ret ;
62116191
6212- if ((req -> flags & REQ_F_WORK_INITIALIZED ) && req -> work .creds &&
6213- req -> work .creds != current_cred ())
6214- old_creds = override_creds (req -> work .creds );
6215-
62166192 ret = io_issue_sqe (req , IO_URING_F_NONBLOCK |IO_URING_F_COMPLETE_DEFER );
62176193
6218- if (old_creds )
6219- revert_creds (old_creds );
6220-
62216194 /*
62226195 * We async punt it if the file wasn't marked NOWAIT, or if the file
62236196 * doesn't support non-blocking read/write attempts
@@ -6304,7 +6277,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
63046277{
63056278 struct io_submit_state * state ;
63066279 unsigned int sqe_flags ;
6307- int id , ret = 0 ;
6280+ int ret = 0 ;
63086281
63096282 req -> opcode = READ_ONCE (sqe -> opcode );
63106283 /* same numerical values with corresponding REQ_F_*, safe to copy */
@@ -6336,15 +6309,9 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
63366309 !io_op_defs [req -> opcode ].buffer_select )
63376310 return - EOPNOTSUPP ;
63386311
6339- id = READ_ONCE (sqe -> personality );
6340- if (id ) {
6341- __io_req_init_async (req );
6342- req -> work .creds = idr_find (& ctx -> personality_idr , id );
6343- if (unlikely (!req -> work .creds ))
6344- return - EINVAL ;
6345- get_cred (req -> work .creds );
6346- }
6347-
6312+ req -> work .list .next = NULL ;
6313+ req -> work .flags = 0 ;
6314+ req -> work .personality = READ_ONCE (sqe -> personality );
63486315 state = & ctx -> submit_state ;
63496316
63506317 /*
0 commit comments