The following commit has been merged in the master branch: commit f7fe12385b5bf4fa954bfd9c2a4db4b8cf4232f0 Merge: 2f2b88eaa9e017807c57b63f8ffe5e2627dbe80c 9461af22c4e4b900d7025153a93b81460c6ac405 Author: Stephen Rothwell sfr@canb.auug.org.au Date: Tue Nov 2 16:01:22 2021 +1100
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm/user-namespace.git
# Conflicts: # drivers/staging/r8188eu/core/rtw_mp.c
diff --combined arch/ia64/include/asm/ptrace.h index 8a2d0f72b324,f15504f75f10..a10a498eede1 --- a/arch/ia64/include/asm/ptrace.h +++ b/arch/ia64/include/asm/ptrace.h @@@ -51,11 -51,6 +51,11 @@@ * the canonical representation by adding to instruction pointer. */ # define instruction_pointer(regs) ((regs)->cr_iip + ia64_psr(regs)->ri) +# define instruction_pointer_set(regs, val) \ +({ \ + ia64_psr(regs)->ri = (val & 0xf); \ + regs->cr_iip = (val & ~0xfULL); \ +})
static inline unsigned long user_stack_pointer(struct pt_regs *regs) { @@@ -134,9 -129,9 +134,9 @@@ static inline long regs_return_value(st extern void ia64_decrement_ip (struct pt_regs *pt);
extern void ia64_ptrace_stop(void); - #define arch_ptrace_stop(code, info) \ + #define arch_ptrace_stop() \ ia64_ptrace_stop() - #define arch_ptrace_stop_needed(code, info) \ + #define arch_ptrace_stop_needed() \ (!test_thread_flag(TIF_RESTORE_RSE))
extern void ptrace_attach_sync_user_rbs (struct task_struct *); diff --combined arch/m68k/kernel/traps.c index 9718ce94cc84,74045d164ddb..99058a6da956 --- a/arch/m68k/kernel/traps.c +++ b/arch/m68k/kernel/traps.c @@@ -181,8 -181,9 +181,8 @@@ static inline void access_error060 (str static inline unsigned long probe040(int iswrite, unsigned long addr, int wbs) { unsigned long mmusr; - mm_segment_t old_fs = get_fs();
- set_fs(MAKE_MM_SEG(wbs)); + set_fc(wbs);
if (iswrite) asm volatile (".chip 68040; ptestw (%0); .chip 68k" : : "a" (addr)); @@@ -191,7 -192,7 +191,7 @@@
asm volatile (".chip 68040; movec %%mmusr,%0; .chip 68k" : "=r" (mmusr));
- set_fs(old_fs); + set_fc(USER_DATA);
return mmusr; } @@@ -200,8 -201,10 +200,8 @@@ static inline int do_040writeback1(unsi unsigned long wbd) { int res = 0; - mm_segment_t old_fs = get_fs();
- /* set_fs can not be moved, otherwise put_user() may oops */ - set_fs(MAKE_MM_SEG(wbs)); + set_fc(wbs);
switch (wbs & WBSIZ_040) { case BA_SIZE_BYTE: @@@ -215,7 -218,9 +215,7 @@@ break; }
- /* set_fs can not be moved, otherwise put_user() may oops */ - set_fs(old_fs); - + set_fc(USER_DATA);
pr_debug("do_040writeback1, res=%d\n", res);
@@@ -1145,7 -1150,7 +1145,7 @@@ asmlinkage void set_esp0(unsigned long */ asmlinkage void fpsp040_die(void) { - force_sigsegv(SIGSEGV); + force_fatal_sig(SIGSEGV); }
#ifdef CONFIG_M68KFPU_EMU diff --combined arch/powerpc/kernel/signal_32.c index 38c3eae40c14,933ab95805a6..00a9c9cd6d42 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@@ -1048,7 -1048,7 +1048,7 @@@ SYSCALL_DEFINE3(swapcontext, struct uco if (new_ctx == NULL) return 0; if (!access_ok(new_ctx, ctx_size) || - fault_in_pages_readable((u8 __user *)new_ctx, ctx_size)) + fault_in_readable((char __user *)new_ctx, ctx_size)) return -EFAULT;
/* @@@ -1062,8 -1062,10 +1062,10 @@@ * or if another thread unmaps the region containing the context. * We kill the task with a SIGSEGV in this situation. */ - if (do_setcontext(new_ctx, regs, 0)) - do_exit(SIGSEGV); + if (do_setcontext(new_ctx, regs, 0)) { + force_fatal_sig(SIGSEGV); + return -EFAULT; + }
set_thread_flag(TIF_RESTOREALL); return 0; @@@ -1237,7 -1239,7 +1239,7 @@@ SYSCALL_DEFINE3(debug_setcontext, struc #endif
if (!access_ok(ctx, sizeof(*ctx)) || - fault_in_pages_readable((u8 __user *)ctx, sizeof(*ctx))) + fault_in_readable((char __user *)ctx, sizeof(*ctx))) return -EFAULT;
/* diff --combined arch/powerpc/kernel/signal_64.c index 9f471b4a11e3,8ead9b3f47c6..ef518535d436 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@@ -688,7 -688,7 +688,7 @@@ SYSCALL_DEFINE3(swapcontext, struct uco if (new_ctx == NULL) return 0; if (!access_ok(new_ctx, ctx_size) || - fault_in_pages_readable((u8 __user *)new_ctx, ctx_size)) + fault_in_readable((char __user *)new_ctx, ctx_size)) return -EFAULT;
/* @@@ -703,15 -703,18 +703,18 @@@ * We kill the task with a SIGSEGV in this situation. */
- if (__get_user_sigset(&set, &new_ctx->uc_sigmask)) - do_exit(SIGSEGV); + if (__get_user_sigset(&set, &new_ctx->uc_sigmask)) { + force_fatal_sig(SIGSEGV); + return -EFAULT; + } set_current_blocked(&set);
if (!user_read_access_begin(new_ctx, ctx_size)) return -EFAULT; if (__unsafe_restore_sigcontext(current, NULL, 0, &new_ctx->uc_mcontext)) { user_read_access_end(); - do_exit(SIGSEGV); + force_fatal_sig(SIGSEGV); + return -EFAULT; } user_read_access_end();
diff --combined arch/s390/kernel/dumpstack.c index 85f326e258df,f45e66b8bed6..0681c55e831d --- a/arch/s390/kernel/dumpstack.c +++ b/arch/s390/kernel/dumpstack.c @@@ -152,7 -152,7 +152,7 @@@ void show_stack(struct task_struct *tas static void show_last_breaking_event(struct pt_regs *regs) { printk("Last Breaking-Event-Address:\n"); - printk(" [<%016lx>] %pSR\n", regs->args[0], (void *)regs->args[0]); + printk(" [<%016lx>] %pSR\n", regs->last_break, (void *)regs->last_break); }
void show_registers(struct pt_regs *regs) @@@ -192,7 -192,7 +192,7 @@@ void show_regs(struct pt_regs *regs
static DEFINE_SPINLOCK(die_lock);
- void die(struct pt_regs *regs, const char *str) + void __noreturn die(struct pt_regs *regs, const char *str) { static int die_counter;
diff --combined arch/s390/kernel/traps.c index 6c6f7dcce1a5,01a7c68dcfb6..035705c9f23e --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@@ -84,7 -84,7 +84,7 @@@ static void default_trap_handler(struc { if (user_mode(regs)) { report_user_fault(regs, SIGSEGV, 0); - do_exit(SIGSEGV); + force_fatal_sig(SIGSEGV); } else die(regs, "Unknown program exception"); } @@@ -300,6 -300,7 +300,6 @@@ static void (*pgm_check_table[128])(str
void noinstr __do_pgm_check(struct pt_regs *regs) { - unsigned long last_break = S390_lowcore.breaking_event_addr; unsigned int trapnr; irqentry_state_t state;
@@@ -310,11 -311,10 +310,11 @@@
if (user_mode(regs)) { update_timer_sys(); - if (last_break < 4096) - last_break = 1; - current->thread.last_break = last_break; - regs->args[0] = last_break; + if (!static_branch_likely(&cpu_has_bear)) { + if (regs->last_break < 4096) + regs->last_break = 1; + } + current->thread.last_break = regs->last_break; }
if (S390_lowcore.pgm_code & 0x0200) { diff --combined arch/xtensa/kernel/traps.c index 35a7d47f28cf,fb056a191339..4b4dbeb2d612 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c @@@ -97,9 -97,7 +97,9 @@@ static dispatch_init_table_t __initdat /* EXCCAUSE_INSTRUCTION_FETCH unhandled */ /* EXCCAUSE_LOAD_STORE_ERROR unhandled*/ { EXCCAUSE_LEVEL1_INTERRUPT, 0, do_interrupt }, +#ifdef SUPPORT_WINDOWED { EXCCAUSE_ALLOCA, USER|KRNL, fast_alloca }, +#endif /* EXCCAUSE_INTEGER_DIVIDE_BY_ZERO unhandled */ /* EXCCAUSE_PRIVILEGED unhandled */ #if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION @@@ -464,10 -462,12 +464,10 @@@ void secondary_trap_init(void
void show_regs(struct pt_regs * regs) { - int i, wmask; + int i;
show_regs_print_info(KERN_DEFAULT);
- wmask = regs->wmask & ~1; - for (i = 0; i < 16; i++) { if ((i % 8) == 0) pr_info("a%02d:", i); @@@ -527,7 -527,7 +527,7 @@@ void show_stack(struct task_struct *tas
DEFINE_SPINLOCK(die_lock);
- void die(const char * str, struct pt_regs * regs, long err) + void __noreturn die(const char * str, struct pt_regs * regs, long err) { static int die_counter; const char *pr = ""; diff --combined drivers/staging/r8188eu/core/rtw_cmd.c index 5d5f25364b2f,d37c9463eecc..48869a7056fd --- a/drivers/staging/r8188eu/core/rtw_cmd.c +++ b/drivers/staging/r8188eu/core/rtw_cmd.c @@@ -9,8 -9,6 +9,8 @@@ #include "../include/mlme_osdep.h" #include "../include/rtw_br_ext.h" #include "../include/rtw_mlme_ext.h" +#include "../include/rtl8188e_dm.h" +#include "../include/rtl8188e_sreset.h"
/* Caller and the rtw_cmd_thread can protect cmd_q by spin_lock. @@@ -21,12 -19,11 +21,12 @@@ static int _rtw_init_cmd_priv(struct cm { int res = _SUCCESS;
- sema_init(&pcmdpriv->cmd_queue_sema, 0); + init_completion(&pcmdpriv->enqueue_cmd); /* sema_init(&(pcmdpriv->cmd_done_sema), 0); */ - sema_init(&pcmdpriv->terminate_cmdthread_sema, 0); + init_completion(&pcmdpriv->start_cmd_thread); + init_completion(&pcmdpriv->stop_cmd_thread);
- _rtw_init_queue(&pcmdpriv->cmd_queue); + rtw_init_queue(&pcmdpriv->cmd_queue);
/* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */
@@@ -170,6 -167,16 +170,6 @@@ static int rtw_cmd_filter(struct cmd_pr { u8 bAllow = false; /* set to true to allow enqueuing cmd when hw_init_completed is false */
- /* To decide allow or not */ - if ((pcmdpriv->padapter->pwrctrlpriv.bHWPwrPindetect) && - (!pcmdpriv->padapter->registrypriv.usbss_enable)) { - if (cmd_obj->cmdcode == GEN_CMD_CODE(_Set_Drv_Extra)) { - struct drvextra_cmd_parm *pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)cmd_obj->parmbuf; - if (pdrvextra_cmd_parm->ec_id == POWER_SAVING_CTRL_WK_CID) - bAllow = true; - } - } - if (cmd_obj->cmdcode == GEN_CMD_CODE(_SetChannelPlan)) bAllow = true;
@@@ -198,7 -205,7 +198,7 @@@ u32 rtw_enqueue_cmd(struct cmd_priv *pc res = _rtw_enqueue_cmd(&pcmdpriv->cmd_queue, cmd_obj);
if (res == _SUCCESS) - up(&pcmdpriv->cmd_queue_sema); + complete(&pcmdpriv->enqueue_cmd);
exit:
@@@ -214,6 -221,14 +214,6 @@@ struct cmd_obj *rtw_dequeue_cmd(struct return cmd_obj; }
-void rtw_cmd_clr_isr(struct cmd_priv *pcmdpriv) -{ - - pcmdpriv->cmd_done_cnt++; - /* up(&(pcmdpriv->cmd_done_sema)); */ - -} - void rtw_free_cmd_obj(struct cmd_obj *pcmd) {
@@@ -244,14 -259,23 +244,14 @@@ int rtw_cmd_thread(void *context struct adapter *padapter = (struct adapter *)context; struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- thread_enter("RTW_CMD_THREAD"); - pcmdbuf = pcmdpriv->cmd_buf;
pcmdpriv->cmdthd_running = true; - up(&pcmdpriv->terminate_cmdthread_sema); + complete(&pcmdpriv->start_cmd_thread);
while (1) { - if (_rtw_down_sema(&pcmdpriv->cmd_queue_sema) == _FAIL) - break; + wait_for_completion(&pcmdpriv->enqueue_cmd);
- if (padapter->bDriverStopped || - padapter->bSurpriseRemoved) { - DBG_88E("%s: DriverStopped(%d) SurpriseRemoved(%d) break at line %d\n", - __func__, padapter->bDriverStopped, padapter->bSurpriseRemoved, __LINE__); - break; - } _next: if (padapter->bDriverStopped || padapter->bSurpriseRemoved) { @@@ -321,11 -345,43 +321,11 @@@ post_process rtw_free_cmd_obj(pcmd); } while (1);
- up(&pcmdpriv->terminate_cmdthread_sema); + complete(&pcmdpriv->stop_cmd_thread);
- thread_exit(); + return 0; }
-u8 rtw_setstandby_cmd(struct adapter *padapter, uint action) -{ - struct cmd_obj *ph2c; - struct usb_suspend_parm *psetusbsuspend; - struct cmd_priv *pcmdpriv = &padapter->cmdpriv; - - u8 ret = _SUCCESS; - - ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); - if (!ph2c) { - ret = _FAIL; - goto exit; - } - - psetusbsuspend = kzalloc(sizeof(struct usb_suspend_parm), GFP_ATOMIC); - if (!psetusbsuspend) { - kfree(ph2c); - ret = _FAIL; - goto exit; - } - - psetusbsuspend->action = action; - - init_h2fwcmd_w_parm_no_rsp(ph2c, psetusbsuspend, GEN_CMD_CODE(_SetUsbSuspend)); - - ret = rtw_enqueue_cmd(pcmdpriv, ph2c); - -exit: - - return ret; -} - /* rtw_sitesurvey_cmd(~) ### NOTE:#### (!!!!) @@@ -435,12 -491,228 +435,12 @@@ exit return res; }
-u8 rtw_setbasicrate_cmd(struct adapter *padapter, u8 *rateset) -{ - struct cmd_obj *ph2c; - struct setbasicrate_parm *pssetbasicratepara; - struct cmd_priv *pcmdpriv = &padapter->cmdpriv; - u8 res = _SUCCESS; - - ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); - if (!ph2c) { - res = _FAIL; - goto exit; - } - pssetbasicratepara = kzalloc(sizeof(struct setbasicrate_parm), GFP_ATOMIC); - - if (!pssetbasicratepara) { - kfree(ph2c); - res = _FAIL; - goto exit; - } - - init_h2fwcmd_w_parm_no_rsp(ph2c, pssetbasicratepara, _SetBasicRate_CMD_); - - memcpy(pssetbasicratepara->basicrates, rateset, NumRates); - - res = rtw_enqueue_cmd(pcmdpriv, ph2c); -exit: - - return res; -} - -/* -unsigned char rtw_setphy_cmd(unsigned char *adapter) - -1. be called only after rtw_update_registrypriv_dev_network(~) or mp testing program -2. for AdHoc/Ap mode or mp mode? - -*/ -u8 rtw_setphy_cmd(struct adapter *padapter, u8 modem, u8 ch) -{ - struct cmd_obj *ph2c; - struct setphy_parm *psetphypara; - struct cmd_priv *pcmdpriv = &padapter->cmdpriv; - u8 res = _SUCCESS; - - ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); - if (!ph2c) { - res = _FAIL; - goto exit; - } - psetphypara = kzalloc(sizeof(struct setphy_parm), GFP_ATOMIC); - - if (!psetphypara) { - kfree(ph2c); - res = _FAIL; - goto exit; - } - - init_h2fwcmd_w_parm_no_rsp(ph2c, psetphypara, _SetPhy_CMD_); - - psetphypara->modem = modem; - psetphypara->rfchannel = ch; - - res = rtw_enqueue_cmd(pcmdpriv, ph2c); -exit: - - return res; -} - -u8 rtw_setbbreg_cmd(struct adapter *padapter, u8 offset, u8 val) -{ - struct cmd_obj *ph2c; - struct writeBB_parm *pwritebbparm; - struct cmd_priv *pcmdpriv = &padapter->cmdpriv; - u8 res = _SUCCESS; - - ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); - if (!ph2c) { - res = _FAIL; - goto exit; - } - pwritebbparm = kzalloc(sizeof(struct writeBB_parm), GFP_ATOMIC); - - if (!pwritebbparm) { - kfree(ph2c); - res = _FAIL; - goto exit; - } - - init_h2fwcmd_w_parm_no_rsp(ph2c, pwritebbparm, GEN_CMD_CODE(_SetBBReg)); - - pwritebbparm->offset = offset; - pwritebbparm->value = val; - - res = rtw_enqueue_cmd(pcmdpriv, ph2c); -exit: - - return res; -} - -u8 rtw_getbbreg_cmd(struct adapter *padapter, u8 offset, u8 *pval) -{ - struct cmd_obj *ph2c; - struct readBB_parm *prdbbparm; - struct cmd_priv *pcmdpriv = &padapter->cmdpriv; - u8 res = _SUCCESS; - - ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); - if (!ph2c) { - res = _FAIL; - goto exit; - } - prdbbparm = kzalloc(sizeof(struct readBB_parm), GFP_ATOMIC); - - if (!prdbbparm) { - kfree(ph2c); - return _FAIL; - } - - INIT_LIST_HEAD(&ph2c->list); - ph2c->cmdcode = GEN_CMD_CODE(_GetBBReg); - ph2c->parmbuf = (unsigned char *)prdbbparm; - ph2c->cmdsz = sizeof(struct readBB_parm); - ph2c->rsp = pval; - ph2c->rspsz = sizeof(struct readBB_rsp); - - prdbbparm->offset = offset; - - res = rtw_enqueue_cmd(pcmdpriv, ph2c); -exit: - - return res; -} - -u8 rtw_setrfreg_cmd(struct adapter *padapter, u8 offset, u32 val) -{ - struct cmd_obj *ph2c; - struct writeRF_parm *pwriterfparm; - struct cmd_priv *pcmdpriv = &padapter->cmdpriv; - u8 res = _SUCCESS; - - ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); - if (!ph2c) { - res = _FAIL; - goto exit; - } - pwriterfparm = kzalloc(sizeof(struct writeRF_parm), GFP_ATOMIC); - - if (!pwriterfparm) { - kfree(ph2c); - res = _FAIL; - goto exit; - } - - init_h2fwcmd_w_parm_no_rsp(ph2c, pwriterfparm, GEN_CMD_CODE(_SetRFReg)); - - pwriterfparm->offset = offset; - pwriterfparm->value = val; - - res = rtw_enqueue_cmd(pcmdpriv, ph2c); -exit: - - return res; -} - -u8 rtw_getrfreg_cmd(struct adapter *padapter, u8 offset, u8 *pval) -{ - struct cmd_obj *ph2c; - struct readRF_parm *prdrfparm; - struct cmd_priv *pcmdpriv = &padapter->cmdpriv; - u8 res = _SUCCESS; - - ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); - if (!ph2c) { - res = _FAIL; - goto exit; - } - - prdrfparm = kzalloc(sizeof(struct readRF_parm), GFP_ATOMIC); - if (!prdrfparm) { - kfree(ph2c); - res = _FAIL; - goto exit; - } - - INIT_LIST_HEAD(&ph2c->list); - ph2c->cmdcode = GEN_CMD_CODE(_GetRFReg); - ph2c->parmbuf = (unsigned char *)prdrfparm; - ph2c->cmdsz = sizeof(struct readRF_parm); - ph2c->rsp = pval; - ph2c->rspsz = sizeof(struct readRF_rsp); - - prdrfparm->offset = offset; - - res = rtw_enqueue_cmd(pcmdpriv, ph2c); - -exit: - - return res; -} - void rtw_getbbrfreg_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd) {
kfree(pcmd->parmbuf); kfree(pcmd); - - if (padapter->registrypriv.mp_mode == 1) - padapter->mppriv.workparam.bcompleted = true; - -} - -void rtw_readtssi_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd) -{ - - - kfree(pcmd->parmbuf); - kfree(pcmd); - - if (padapter->registrypriv.mp_mode == 1) - padapter->mppriv.workparam.bcompleted = true; - }
u8 rtw_createbss_cmd(struct adapter *padapter) @@@ -471,6 -743,32 +471,6 @@@ exit return res; }
-u8 rtw_createbss_cmd_ex(struct adapter *padapter, unsigned char *pbss, unsigned int sz) -{ - struct cmd_obj *pcmd; - struct cmd_priv *pcmdpriv = &padapter->cmdpriv; - u8 res = _SUCCESS; - - pcmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); - if (!pcmd) { - res = _FAIL; - goto exit; - } - - INIT_LIST_HEAD(&pcmd->list); - pcmd->cmdcode = GEN_CMD_CODE(_CreateBss); - pcmd->parmbuf = pbss; - pcmd->cmdsz = sz; - pcmd->rsp = NULL; - pcmd->rspsz = 0; - - res = rtw_enqueue_cmd(pcmdpriv, pcmd); - -exit: - - return res; -} - u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork) { u8 res = _SUCCESS; @@@ -775,6 -1073,115 +775,6 @@@ exit return res; }
-u8 rtw_setrttbl_cmd(struct adapter *padapter, struct setratable_parm *prate_table) -{ - struct cmd_obj *ph2c; - struct setratable_parm *psetrttblparm; - struct cmd_priv *pcmdpriv = &padapter->cmdpriv; - u8 res = _SUCCESS; - - ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); - if (!ph2c) { - res = _FAIL; - goto exit; - } - psetrttblparm = kzalloc(sizeof(struct setratable_parm), GFP_KERNEL); - - if (!psetrttblparm) { - kfree(ph2c); - res = _FAIL; - goto exit; - } - - init_h2fwcmd_w_parm_no_rsp(ph2c, psetrttblparm, GEN_CMD_CODE(_SetRaTable)); - - memcpy(psetrttblparm, prate_table, sizeof(struct setratable_parm)); - - res = rtw_enqueue_cmd(pcmdpriv, ph2c); -exit: - - return res; -} - -u8 rtw_getrttbl_cmd(struct adapter *padapter, struct getratable_rsp *pval) -{ - struct cmd_obj *ph2c; - struct getratable_parm *pgetrttblparm; - struct cmd_priv *pcmdpriv = &padapter->cmdpriv; - u8 res = _SUCCESS; - - ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); - if (!ph2c) { - res = _FAIL; - goto exit; - } - pgetrttblparm = kzalloc(sizeof(struct getratable_parm), GFP_KERNEL); - - if (!pgetrttblparm) { - kfree(ph2c); - res = _FAIL; - goto exit; - } - -/* init_h2fwcmd_w_parm_no_rsp(ph2c, psetrttblparm, GEN_CMD_CODE(_SetRaTable)); */ - - INIT_LIST_HEAD(&ph2c->list); - ph2c->cmdcode = GEN_CMD_CODE(_GetRaTable); - ph2c->parmbuf = (unsigned char *)pgetrttblparm; - ph2c->cmdsz = sizeof(struct getratable_parm); - ph2c->rsp = (u8 *)pval; - ph2c->rspsz = sizeof(struct getratable_rsp); - - pgetrttblparm->rsvd = 0x0; - - res = rtw_enqueue_cmd(pcmdpriv, ph2c); -exit: - - return res; -} - -u8 rtw_setassocsta_cmd(struct adapter *padapter, u8 *mac_addr) -{ - struct cmd_priv *pcmdpriv = &padapter->cmdpriv; - struct cmd_obj *ph2c; - struct set_assocsta_parm *psetassocsta_para; - struct set_stakey_rsp *psetassocsta_rsp = NULL; - - u8 res = _SUCCESS; - - ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); - if (!ph2c) { - res = _FAIL; - goto exit; - } - - psetassocsta_para = kzalloc(sizeof(struct set_assocsta_parm), GFP_ATOMIC); - if (!psetassocsta_para) { - kfree(ph2c); - res = _FAIL; - goto exit; - } - - psetassocsta_rsp = kzalloc(sizeof(struct set_assocsta_rsp), GFP_ATOMIC); - if (!psetassocsta_rsp) { - kfree(ph2c); - kfree(psetassocsta_para); - return _FAIL; - } - - init_h2fwcmd_w_parm_no_rsp(ph2c, psetassocsta_para, _SetAssocSta_CMD_); - ph2c->rsp = (u8 *)psetassocsta_rsp; - ph2c->rspsz = sizeof(struct set_assocsta_rsp); - - memcpy(psetassocsta_para->addr, mac_addr, ETH_ALEN); - - res = rtw_enqueue_cmd(pcmdpriv, ph2c); - -exit: - - return res; - } - u8 rtw_addbareq_cmd(struct adapter *padapter, u8 tid, u8 *addr) { struct cmd_priv *pcmdpriv = &padapter->cmdpriv; @@@ -843,6 -1250,57 +843,6 @@@ exit return res; }
-u8 rtw_set_ch_cmd(struct adapter *padapter, u8 ch, u8 bw, u8 ch_offset, u8 enqueue) -{ - struct cmd_obj *pcmdobj; - struct set_ch_parm *set_ch_parm; - struct cmd_priv *pcmdpriv = &padapter->cmdpriv; - - u8 res = _SUCCESS; - - DBG_88E(FUNC_NDEV_FMT" ch:%u, bw:%u, ch_offset:%u\n", - FUNC_NDEV_ARG(padapter->pnetdev), ch, bw, ch_offset); - - /* check input parameter */ - - /* prepare cmd parameter */ - set_ch_parm = kzalloc(sizeof(*set_ch_parm), GFP_ATOMIC); - if (!set_ch_parm) { - res = _FAIL; - goto exit; - } - set_ch_parm->ch = ch; - set_ch_parm->bw = bw; - set_ch_parm->ch_offset = ch_offset; - - if (enqueue) { - /* need enqueue, prepare cmd_obj and enqueue */ - pcmdobj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); - if (!pcmdobj) { - kfree(set_ch_parm); - res = _FAIL; - goto exit; - } - - init_h2fwcmd_w_parm_no_rsp(pcmdobj, set_ch_parm, GEN_CMD_CODE(_SetChannel)); - res = rtw_enqueue_cmd(pcmdpriv, pcmdobj); - } else { - /* no need to enqueue, do the cmd hdl directly and free cmd parameter */ - if (H2C_SUCCESS != set_ch_hdl(padapter, (u8 *)set_ch_parm)) - res = _FAIL; - - kfree(set_ch_parm); - } - - /* do something based on res... */ - -exit: - - DBG_88E(FUNC_NDEV_FMT" res:%u\n", FUNC_NDEV_ARG(padapter->pnetdev), res); - - return res; -} - u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan, u8 enqueue) { struct cmd_obj *pcmdobj; @@@ -894,6 -1352,74 +894,6 @@@ exit return res; }
-u8 rtw_led_blink_cmd(struct adapter *padapter, struct LED_871x *pLed) -{ - struct cmd_obj *pcmdobj; - struct LedBlink_param *ledBlink_param; - struct cmd_priv *pcmdpriv = &padapter->cmdpriv; - - u8 res = _SUCCESS; - - pcmdobj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); - if (!pcmdobj) { - res = _FAIL; - goto exit; - } - - ledBlink_param = kzalloc(sizeof(struct LedBlink_param), GFP_ATOMIC); - if (!ledBlink_param) { - kfree(pcmdobj); - res = _FAIL; - goto exit; - } - - ledBlink_param->pLed = pLed; - - init_h2fwcmd_w_parm_no_rsp(pcmdobj, ledBlink_param, GEN_CMD_CODE(_LedBlink)); - res = rtw_enqueue_cmd(pcmdpriv, pcmdobj); - -exit: - - return res; -} - -u8 rtw_set_csa_cmd(struct adapter *padapter, u8 new_ch_no) -{ - struct cmd_obj *pcmdobj; - struct SetChannelSwitch_param *setChannelSwitch_param; - struct cmd_priv *pcmdpriv = &padapter->cmdpriv; - - u8 res = _SUCCESS; - - pcmdobj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); - if (!pcmdobj) { - res = _FAIL; - goto exit; - } - - setChannelSwitch_param = kzalloc(sizeof(struct SetChannelSwitch_param), - GFP_ATOMIC); - if (!setChannelSwitch_param) { - kfree(pcmdobj); - res = _FAIL; - goto exit; - } - - setChannelSwitch_param->new_ch_no = new_ch_no; - - init_h2fwcmd_w_parm_no_rsp(pcmdobj, setChannelSwitch_param, GEN_CMD_CODE(_SetChannelSwitch)); - res = rtw_enqueue_cmd(pcmdpriv, pcmdobj); - -exit: - - return res; -} - -u8 rtw_tdls_cmd(struct adapter *padapter, u8 *addr, u8 option) -{ - return _SUCCESS; -} - static void traffic_status_watchdog(struct adapter *padapter) { u8 bEnterPS; @@@ -960,15 -1486,17 +960,15 @@@ static void dynamic_chk_wk_hdl(struct a padapter = (struct adapter *)pbuf; pmlmepriv = &padapter->mlmepriv;
-#ifdef CONFIG_88EU_AP_MODE if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) expire_timeout_chk(padapter); -#endif
- rtw_hal_sreset_xmit_status_check(padapter); + rtl8188e_sreset_xmit_status_check(padapter);
linked_status_chk(padapter); traffic_status_watchdog(padapter);
- rtw_hal_dm_watchdog(padapter); + rtl8188e_HalDmWatchDog(padapter); }
static void lps_ctrl_wk_hdl(struct adapter *padapter, u8 lps_ctrl_type) @@@ -995,12 -1523,12 +995,12 @@@ mstatus = 1;/* connect */ /* Reset LPS Setting */ padapter->pwrctrlpriv.LpsIdleCount = 0; - rtw_hal_set_hwreg(padapter, HW_VAR_H2C_FW_JOINBSSRPT, (u8 *)(&mstatus)); + SetHwReg8188EU(padapter, HW_VAR_H2C_FW_JOINBSSRPT, (u8 *)(&mstatus)); break; case LPS_CTRL_DISCONNECT: mstatus = 0;/* disconnect */ LPS_Leave(padapter); - rtw_hal_set_hwreg(padapter, HW_VAR_H2C_FW_JOINBSSRPT, (u8 *)(&mstatus)); + SetHwReg8188EU(padapter, HW_VAR_H2C_FW_JOINBSSRPT, (u8 *)(&mstatus)); break; case LPS_CTRL_SPECIAL_PACKET: /* DBG_88E("LPS_CTRL_SPECIAL_PACKET\n"); */ @@@ -1060,7 -1588,7 +1060,7 @@@ exit
static void rpt_timer_setting_wk_hdl(struct adapter *padapter, u16 min_time) { - rtw_hal_set_hwreg(padapter, HW_VAR_RPT_TIMER_SETTING, (u8 *)(&min_time)); + SetHwReg8188EU(padapter, HW_VAR_RPT_TIMER_SETTING, (u8 *)(&min_time)); }
u8 rtw_rpt_timer_cfg_cmd(struct adapter *padapter, u16 min_time) @@@ -1097,7 -1625,7 +1097,7 @@@ exit
static void antenna_select_wk_hdl(struct adapter *padapter, u8 antenna) { - rtw_hal_set_hwreg(padapter, HW_VAR_ANTENNA_DIVERSITY_SELECT, (u8 *)(&antenna)); + SetHwReg8188EU(padapter, HW_VAR_ANTENNA_DIVERSITY_SELECT, (u8 *)(&antenna)); }
u8 rtw_antenna_select_cmd(struct adapter *padapter, u8 antenna, u8 enqueue) @@@ -1108,7 -1636,7 +1108,7 @@@ u8 support_ant_div; u8 res = _SUCCESS;
- rtw_hal_get_def_var(padapter, HAL_DEF_IS_SUPPORT_ANT_DIV, &support_ant_div); + GetHalDefVar8188EUsb(padapter, HAL_DEF_IS_SUPPORT_ANT_DIV, &support_ant_div); if (!support_ant_div) return res;
@@@ -1141,6 -1669,12 +1141,6 @@@ exit return res; }
-static void power_saving_wk_hdl(struct adapter *padapter, u8 *pbuf, int sz) -{ - rtw_ps_processor(padapter); -} - -#ifdef CONFIG_88EU_P2P u8 p2p_protocol_wk_cmd(struct adapter *padapter, int intCmdType) { struct cmd_obj *ph2c; @@@ -1177,6 -1711,7 +1177,6 @@@ exit
return res; } -#endif /* CONFIG_88EU_P2P */
u8 rtw_ps_cmd(struct adapter *padapter) { @@@ -1210,6 -1745,8 +1210,6 @@@ exit return res; }
-#ifdef CONFIG_88EU_AP_MODE - static void rtw_chk_hi_queue_hdl(struct adapter *padapter) { int cnt = 0; @@@ -1226,7 -1763,7 +1226,7 @@@ /* while ((rtw_read32(padapter, 0x414)&0x00ffff00)!= 0) */ /* while ((rtw_read32(padapter, 0x414)&0x0000ff00)!= 0) */
- rtw_hal_get_hwreg(padapter, HW_VAR_CHK_HI_QUEUE_EMPTY, &val); + GetHwReg8188EU(padapter, HW_VAR_CHK_HI_QUEUE_EMPTY, &val);
while (!val) { msleep(100); @@@ -1236,7 -1773,7 +1236,7 @@@ if (cnt > 10) break;
- rtw_hal_get_hwreg(padapter, HW_VAR_CHK_HI_QUEUE_EMPTY, &val); + GetHwReg8188EU(padapter, HW_VAR_CHK_HI_QUEUE_EMPTY, &val); }
if (cnt <= 10) { @@@ -1280,6 -1817,7 +1280,6 @@@ u8 rtw_chk_hi_queue_cmd(struct adapter exit: return res; } -#endif
u8 rtw_c2h_wk_cmd(struct adapter *padapter, u8 *c2h_evt) { @@@ -1314,12 -1852,29 +1314,12 @@@ exit return res; }
-static s32 c2h_evt_hdl(struct adapter *adapter, struct c2h_evt_hdr *c2h_evt, c2h_id_filter filter) +static void c2h_evt_hdl(struct adapter *adapter, struct c2h_evt_hdr *c2h_evt, c2h_id_filter filter) { - s32 ret = _FAIL; u8 buf[16];
- if (!c2h_evt) { - /* No c2h event in cmd_obj, read c2h event before handling*/ - if (c2h_evt_read(adapter, buf) == _SUCCESS) { - c2h_evt = (struct c2h_evt_hdr *)buf; - - if (filter && !filter(c2h_evt->id)) - goto exit; - - ret = rtw_hal_c2h_handler(adapter, c2h_evt); - } - } else { - if (filter && !filter(c2h_evt->id)) - goto exit; - - ret = rtw_hal_c2h_handler(adapter, c2h_evt); - } -exit: - return ret; + if (!c2h_evt) + c2h_evt_read(adapter, buf); }
static void c2h_wk_callback(struct work_struct *work) @@@ -1327,6 -1882,7 +1327,6 @@@ struct evt_priv *evtpriv = container_of(work, struct evt_priv, c2h_wk); struct adapter *adapter = container_of(evtpriv, struct adapter, evtpriv); struct c2h_evt_hdr *c2h_evt; - c2h_id_filter ccx_id_filter = rtw_hal_c2h_id_filter_ccx(adapter);
evtpriv->c2h_wk_alive = true;
@@@ -1356,8 -1912,16 +1356,8 @@@ continue; }
- if (ccx_id_filter(c2h_evt->id)) { - /* Handle CCX report here */ - rtw_hal_c2h_handler(adapter, c2h_evt); - kfree(c2h_evt); - } else { -#ifdef CONFIG_88EU_P2P - /* Enqueue into cmd_thread for others */ - rtw_c2h_wk_cmd(adapter, (u8 *)c2h_evt); -#endif - } + /* Enqueue into cmd_thread for others */ + rtw_c2h_wk_cmd(adapter, (u8 *)c2h_evt); }
evtpriv->c2h_wk_alive = false; @@@ -1377,7 -1941,7 +1377,7 @@@ u8 rtw_drvextra_cmd_hdl(struct adapter dynamic_chk_wk_hdl(padapter, pdrvextra_cmd->pbuf, pdrvextra_cmd->type_size); break; case POWER_SAVING_CTRL_WK_CID: - power_saving_wk_hdl(padapter, pdrvextra_cmd->pbuf, pdrvextra_cmd->type_size); + rtw_ps_processor(padapter); break; case LPS_CTRL_WK_CID: lps_ctrl_wk_hdl(padapter, (u8)pdrvextra_cmd->type_size); @@@ -1388,6 -1952,7 +1388,6 @@@ case ANT_SELECT_WK_CID: antenna_select_wk_hdl(padapter, pdrvextra_cmd->type_size); break; -#ifdef CONFIG_88EU_P2P case P2P_PS_WK_CID: p2p_ps_wk_hdl(padapter, pdrvextra_cmd->type_size); break; @@@ -1396,9 -1961,12 +1396,9 @@@ /* I used the type_size as the type command */ p2p_protocol_wk_hdl(padapter, pdrvextra_cmd->type_size); break; -#endif -#ifdef CONFIG_88EU_AP_MODE case CHECK_HIQ_WK_CID: rtw_chk_hi_queue_hdl(padapter); break; -#endif /* CONFIG_88EU_AP_MODE */ case C2H_WK_CID: c2h_evt_hdl(padapter, (struct c2h_evt_hdr *)pdrvextra_cmd->pbuf, NULL); break; diff --combined drivers/staging/r8188eu/include/osdep_service.h index efab3a97eb46,afbffb551f9b..f6f5e4581212 --- a/drivers/staging/r8188eu/include/osdep_service.h +++ b/drivers/staging/r8188eu/include/osdep_service.h @@@ -49,13 -49,24 +49,11 @@@ struct __queue spinlock_t lock; };
- #define thread_exit() complete_and_exit(NULL, 0) - static inline struct list_head *get_list_head(struct __queue *queue) { return (&(queue->queue)); }
-static inline int _enter_critical_mutex(struct mutex *pmutex, unsigned long *pirqL) -{ - int ret; - - ret = mutex_lock_interruptible(pmutex); - return ret; -} - -static inline void _exit_critical_mutex(struct mutex *pmutex, unsigned long *pirqL) -{ - mutex_unlock(pmutex); -} - static inline void rtw_list_delete(struct list_head *plist) { list_del_init(plist); @@@ -141,11 -152,11 +139,11 @@@ extern unsigned char RSN_TKIP_CIPHER[4]
void *rtw_malloc2d(int h, int w, int size);
-u32 _rtw_down_sema(struct semaphore *sema); -void _rtw_mutex_init(struct mutex *pmutex); -void _rtw_mutex_free(struct mutex *pmutex); - -void _rtw_init_queue(struct __queue *pqueue); +#define rtw_init_queue(q) \ + do { \ + INIT_LIST_HEAD(&((q)->queue)); \ + spin_lock_init(&((q)->lock)); \ + } while (0)
u32 rtw_systime_to_ms(u32 systime); u32 rtw_ms_to_systime(u32 ms); @@@ -153,17 -164,32 +151,17 @@@ s32 rtw_get_passing_time_ms(u32 start)
void rtw_usleep_os(int us);
-u32 rtw_atoi(u8 *s); - static inline unsigned char _cancel_timer_ex(struct timer_list *ptimer) { return del_timer_sync(ptimer); }
-static __inline void thread_enter(char *name) -{ -#ifdef daemonize - daemonize("%s", name); -#endif - allow_signal(SIGTERM); -} - static inline void flush_signals_thread(void) { if (signal_pending (current)) flush_signals(current); }
-static inline int res_to_status(int res) -{ - return res; -} - #define _RND(sz, r) ((((sz)+((r)-1))/(r))*(r)) #define RND4(x) (((x >> 2) + (((x & 3) == 0) ? 0: 1)) << 2)
@@@ -274,10 -300,12 +272,10 @@@ struct rtw_cbuf u32 write; u32 read; u32 size; - void *bufs[0]; + void *bufs[]; };
-bool rtw_cbuf_full(struct rtw_cbuf *cbuf); bool rtw_cbuf_empty(struct rtw_cbuf *cbuf); -bool rtw_cbuf_push(struct rtw_cbuf *cbuf, void *buf); void *rtw_cbuf_pop(struct rtw_cbuf *cbuf); struct rtw_cbuf *rtw_cbuf_alloc(u32 size); int wifirate2_ratetbl_inx(unsigned char rate); diff --combined drivers/staging/rtl8723bs/core/rtw_cmd.c index 639459d52261,8e69f9c10f5c..bd24d913b464 --- a/drivers/staging/rtl8723bs/core/rtw_cmd.c +++ b/drivers/staging/rtl8723bs/core/rtw_cmd.c @@@ -166,8 -166,7 +166,8 @@@ int rtw_init_cmd_priv(struct cmd_priv * init_completion(&pcmdpriv->cmd_queue_comp); init_completion(&pcmdpriv->terminate_cmdthread_comp);
- _rtw_init_queue(&(pcmdpriv->cmd_queue)); + INIT_LIST_HEAD(&pcmdpriv->cmd_queue.queue); + spin_lock_init(&pcmdpriv->cmd_queue.lock);
/* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */
@@@ -256,7 -255,7 +256,7 @@@ int _rtw_enqueue_cmd(struct __queue *qu { unsigned long irqL;
- if (obj == NULL) + if (!obj) goto exit;
/* spin_lock_bh(&queue->lock); */ @@@ -278,10 -277,10 +278,10 @@@ struct cmd_obj *_rtw_dequeue_cmd(struc
/* spin_lock_bh(&(queue->lock)); */ spin_lock_irqsave(&queue->lock, irqL); - if (list_empty(&(queue->queue))) + if (list_empty(&queue->queue)) obj = NULL; else { - obj = container_of(get_next(&(queue->queue)), struct cmd_obj, list); + obj = container_of(get_next(&queue->queue), struct cmd_obj, list); list_del_init(&obj->list); }
@@@ -309,19 -308,22 +309,19 @@@ int rtw_cmd_filter(struct cmd_priv *pcm if (cmd_obj->cmdcode == GEN_CMD_CODE(_SetChannelPlan)) bAllow = true;
- if ((pcmdpriv->padapter->hw_init_completed == false && bAllow == false) - || atomic_read(&(pcmdpriv->cmdthd_running)) == false /* com_thread not running */ - ) + if ((!pcmdpriv->padapter->hw_init_completed && !bAllow) || + !atomic_read(&pcmdpriv->cmdthd_running)) /* com_thread not running */ return _FAIL;
return _SUCCESS; }
- - int rtw_enqueue_cmd(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj) { int res = _FAIL; struct adapter *padapter = pcmdpriv->padapter;
- if (cmd_obj == NULL) + if (!cmd_obj) goto exit;
cmd_obj->padapter = padapter; @@@ -365,10 -367,11 +365,10 @@@ void rtw_free_cmd_obj(struct cmd_obj *p kfree(pcmd); }
- void rtw_stop_cmd_thread(struct adapter *adapter) { if (adapter->cmdThread && - atomic_read(&(adapter->cmdpriv.cmdthd_running)) == true && + atomic_read(&adapter->cmdpriv.cmdthd_running) && adapter->cmdpriv.stop_req == 0) { adapter->cmdpriv.stop_req = 1; complete(&adapter->cmdpriv.cmd_queue_comp); @@@ -384,7 -387,7 +384,7 @@@ int rtw_cmd_thread(void *context u8 (*cmd_hdl)(struct adapter *padapter, u8 *pbuf); void (*pcmd_callback)(struct adapter *dev, struct cmd_obj *pcmd); struct adapter *padapter = context; - struct cmd_priv *pcmdpriv = &(padapter->cmdpriv); + struct cmd_priv *pcmdpriv = &padapter->cmdpriv; struct drvextra_cmd_parm *extra_parm = NULL;
thread_enter("RTW_CMD_THREAD"); @@@ -392,7 -395,7 +392,7 @@@ pcmdbuf = pcmdpriv->cmd_buf;
pcmdpriv->stop_req = 0; - atomic_set(&(pcmdpriv->cmdthd_running), true); + atomic_set(&pcmdpriv->cmdthd_running, true); complete(&pcmdpriv->terminate_cmdthread_comp);
while (1) { @@@ -403,7 -406,7 +403,7 @@@ break; }
- if ((padapter->bDriverStopped == true) || (padapter->bSurpriseRemoved == true)) { + if (padapter->bDriverStopped || padapter->bSurpriseRemoved) { netdev_dbg(padapter->pnetdev, "%s: DriverStopped(%d) SurpriseRemoved(%d) break at line %d\n", __func__, padapter->bDriverStopped, @@@ -426,7 -429,7 +426,7 @@@ continue;
_next: - if ((padapter->bDriverStopped == true) || (padapter->bSurpriseRemoved == true)) { + if (padapter->bDriverStopped || padapter->bSurpriseRemoved) { netdev_dbg(padapter->pnetdev, "%s: DriverStopped(%d) SurpriseRemoved(%d) break at line %d\n", __func__, padapter->bDriverStopped, @@@ -468,7 -471,7 +468,7 @@@
post_process:
- if (mutex_lock_interruptible(&(pcmd->padapter->cmdpriv.sctx_mutex)) == 0) { + if (mutex_lock_interruptible(&pcmd->padapter->cmdpriv.sctx_mutex) == 0) { if (pcmd->sctx) { netdev_dbg(padapter->pnetdev, FUNC_ADPT_FMT " pcmd->sctx\n", @@@ -479,13 -482,13 +479,13 @@@ else rtw_sctx_done_err(&pcmd->sctx, RTW_SCTX_DONE_CMD_ERROR); } - mutex_unlock(&(pcmd->padapter->cmdpriv.sctx_mutex)); + mutex_unlock(&pcmd->padapter->cmdpriv.sctx_mutex); }
/* call callback function for post-processed */ if (pcmd->cmdcode < ARRAY_SIZE(rtw_cmd_callback)) { pcmd_callback = rtw_cmd_callback[pcmd->cmdcode].callback; - if (pcmd_callback == NULL) { + if (!pcmd_callback) { rtw_free_cmd_obj(pcmd); } else { /* todo: !!! fill rsp_buf to pcmd->rsp if (pcmd->rsp!= NULL) */ @@@ -494,14 -497,17 +494,14 @@@ } else { rtw_free_cmd_obj(pcmd); } - flush_signals_thread(); - goto _next; - }
/* free all cmd_obj resources */ do { pcmd = rtw_dequeue_cmd(pcmdpriv); - if (pcmd == NULL) { + if (!pcmd) { rtw_unregister_cmd_alive(padapter); break; } @@@ -516,9 -522,9 +516,9 @@@ } while (1);
complete(&pcmdpriv->terminate_cmdthread_comp); - atomic_set(&(pcmdpriv->cmdthd_running), false); + atomic_set(&pcmdpriv->cmdthd_running, false);
- thread_exit(); + return 0; }
/* @@@ -536,15 -542,15 +536,15 @@@ u8 rtw_sitesurvey_cmd(struct adapter * struct cmd_priv *pcmdpriv = &padapter->cmdpriv; struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- if (check_fwstate(pmlmepriv, _FW_LINKED) == true) + if (check_fwstate(pmlmepriv, _FW_LINKED)) rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SCAN, 1);
ph2c = rtw_zmalloc(sizeof(struct cmd_obj)); - if (ph2c == NULL) + if (!ph2c) return _FAIL;
psurveyPara = rtw_zmalloc(sizeof(struct sitesurvey_parm)); - if (psurveyPara == NULL) { + if (!psurveyPara) { kfree(ph2c); return _FAIL; } @@@ -585,6 -591,7 +585,6 @@@ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
if (res == _SUCCESS) { - pmlmepriv->scan_start_time = jiffies; _set_timer(&pmlmepriv->scan_to_timer, SCANNING_TIMEOUT); } else { @@@ -601,13 -608,13 +601,13 @@@ u8 rtw_setdatarate_cmd(struct adapter * u8 res = _SUCCESS;
ph2c = rtw_zmalloc(sizeof(struct cmd_obj)); - if (ph2c == NULL) { + if (!ph2c) { res = _FAIL; goto exit; }
pbsetdataratepara = rtw_zmalloc(sizeof(struct setdatarate_parm)); - if (pbsetdataratepara == NULL) { + if (!pbsetdataratepara) { kfree(ph2c); res = _FAIL; goto exit; @@@ -637,7 -644,7 +637,7 @@@ u8 rtw_createbss_cmd(struct adapter *p u8 res = _SUCCESS;
pcmd = rtw_zmalloc(sizeof(struct cmd_obj)); - if (pcmd == NULL) { + if (!pcmd) { res = _FAIL; goto exit; } @@@ -670,7 -677,7 +670,7 @@@ int rtw_startbss_cmd(struct adapter *p } else { /* need enqueue, prepare cmd_obj and enqueue */ pcmd = rtw_zmalloc(sizeof(struct cmd_obj)); - if (pcmd == NULL) { + if (!pcmd) { res = _FAIL; goto exit; } @@@ -717,12 -724,12 +717,12 @@@ u8 rtw_joinbss_cmd(struct adapter *pad struct ht_priv *phtpriv = &pmlmepriv->htpriv; enum ndis_802_11_network_infrastructure ndis_network_mode = pnetwork->network.infrastructure_mode; struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; - struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); + struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info; u32 tmp_len; u8 *ptmp = NULL;
pcmd = rtw_zmalloc(sizeof(struct cmd_obj)); - if (pcmd == NULL) { + if (!pcmd) { res = _FAIL; goto exit; } @@@ -745,6 -752,7 +745,6 @@@ case Ndis802_11AutoUnknown: case Ndis802_11InfrastructureMax: break; - } }
@@@ -767,7 -775,7 +767,7 @@@ /* If not, we have to copy the connecting AP's MAC address to it so that */ /* the driver just has the bssid information for PMKIDList searching. */
- if (pmlmepriv->assoc_by_bssid == false) + if (!pmlmepriv->assoc_by_bssid) memcpy(&pmlmepriv->assoc_bssid[0], &pnetwork->network.mac_address[0], ETH_ALEN);
psecnetwork->ie_length = rtw_restruct_sec_ie(padapter, &pnetwork->network.ies[0], &psecnetwork->ies[0], pnetwork->network.ie_length); @@@ -833,7 -841,7 +833,7 @@@ u8 rtw_disassoc_cmd(struct adapter *pad
/* prepare cmd parameter */ param = rtw_zmalloc(sizeof(*param)); - if (param == NULL) { + if (!param) { res = _FAIL; goto exit; } @@@ -842,7 -850,7 +842,7 @@@ if (enqueue) { /* need enqueue, prepare cmd_obj and enqueue */ cmdobj = rtw_zmalloc(sizeof(*cmdobj)); - if (cmdobj == NULL) { + if (!cmdobj) { res = _FAIL; kfree(param); goto exit; @@@ -870,7 -878,7 +870,7 @@@ u8 rtw_setopmode_cmd(struct adapter *p
psetop = rtw_zmalloc(sizeof(struct setopmode_parm));
- if (psetop == NULL) { + if (!psetop) { res = _FAIL; goto exit; } @@@ -878,7 -886,7 +878,7 @@@
if (enqueue) { ph2c = rtw_zmalloc(sizeof(struct cmd_obj)); - if (ph2c == NULL) { + if (!ph2c) { kfree(psetop); res = _FAIL; goto exit; @@@ -906,7 -914,7 +906,7 @@@ u8 rtw_setstakey_cmd(struct adapter *pa u8 res = _SUCCESS;
psetstakey_para = rtw_zmalloc(sizeof(struct set_stakey_parm)); - if (psetstakey_para == NULL) { + if (!psetstakey_para) { res = _FAIL; goto exit; } @@@ -914,11 -922,11 +914,11 @@@ memcpy(psetstakey_para->addr, sta->hwaddr, ETH_ALEN);
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) - psetstakey_para->algorithm = (unsigned char) psecuritypriv->dot11PrivacyAlgrthm; + psetstakey_para->algorithm = (unsigned char)psecuritypriv->dot11PrivacyAlgrthm; else GET_ENCRY_ALGO(psecuritypriv, sta, psetstakey_para->algorithm, false);
- if (unicast_key == true) + if (unicast_key) memcpy(&psetstakey_para->key, &sta->dot118021x_UncstKey, 16); else memcpy(&psetstakey_para->key, &psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey, 16); @@@ -928,14 -936,14 +928,14 @@@
if (enqueue) { ph2c = rtw_zmalloc(sizeof(struct cmd_obj)); - if (ph2c == NULL) { + if (!ph2c) { kfree(psetstakey_para); res = _FAIL; goto exit; }
psetstakey_rsp = rtw_zmalloc(sizeof(struct set_stakey_rsp)); - if (psetstakey_rsp == NULL) { + if (!psetstakey_rsp) { kfree(ph2c); kfree(psetstakey_para); res = _FAIL; @@@ -943,7 -951,7 +943,7 @@@ }
init_h2fwcmd_w_parm_no_rsp(ph2c, psetstakey_para, _SetStaKey_CMD_); - ph2c->rsp = (u8 *) psetstakey_rsp; + ph2c->rsp = (u8 *)psetstakey_rsp; ph2c->rspsz = sizeof(struct set_stakey_rsp); res = rtw_enqueue_cmd(pcmdpriv, ph2c); } else { @@@ -973,20 -981,20 +973,20 @@@ u8 rtw_clearstakey_cmd(struct adapter * } } else { ph2c = rtw_zmalloc(sizeof(struct cmd_obj)); - if (ph2c == NULL) { + if (!ph2c) { res = _FAIL; goto exit; }
psetstakey_para = rtw_zmalloc(sizeof(struct set_stakey_parm)); - if (psetstakey_para == NULL) { + if (!psetstakey_para) { kfree(ph2c); res = _FAIL; goto exit; }
psetstakey_rsp = rtw_zmalloc(sizeof(struct set_stakey_rsp)); - if (psetstakey_rsp == NULL) { + if (!psetstakey_rsp) { kfree(ph2c); kfree(psetstakey_para); res = _FAIL; @@@ -994,7 -1002,7 +994,7 @@@ }
init_h2fwcmd_w_parm_no_rsp(ph2c, psetstakey_para, _SetStaKey_CMD_); - ph2c->rsp = (u8 *) psetstakey_rsp; + ph2c->rsp = (u8 *)psetstakey_rsp; ph2c->rspsz = sizeof(struct set_stakey_rsp);
memcpy(psetstakey_para->addr, sta->hwaddr, ETH_ALEN); @@@ -1002,7 -1010,9 +1002,7 @@@ psetstakey_para->algorithm = _NO_PRIVACY_;
res = rtw_enqueue_cmd(pcmdpriv, ph2c); - } - exit: return res; } @@@ -1016,13 -1026,13 +1016,13 @@@ u8 rtw_addbareq_cmd(struct adapter *pad u8 res = _SUCCESS;
ph2c = rtw_zmalloc(sizeof(struct cmd_obj)); - if (ph2c == NULL) { + if (!ph2c) { res = _FAIL; goto exit; }
paddbareq_parm = rtw_zmalloc(sizeof(struct addBaReq_parm)); - if (paddbareq_parm == NULL) { + if (!paddbareq_parm) { kfree(ph2c); res = _FAIL; goto exit; @@@ -1048,13 -1058,13 +1048,13 @@@ u8 rtw_reset_securitypriv_cmd(struct ad u8 res = _SUCCESS;
ph2c = rtw_zmalloc(sizeof(struct cmd_obj)); - if (ph2c == NULL) { + if (!ph2c) { res = _FAIL; goto exit; }
pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm)); - if (pdrvextra_cmd_parm == NULL) { + if (!pdrvextra_cmd_parm) { kfree(ph2c); res = _FAIL; goto exit; @@@ -1070,6 -1080,7 +1070,6 @@@
/* rtw_enqueue_cmd(pcmdpriv, ph2c); */ res = rtw_enqueue_cmd(pcmdpriv, ph2c); - exit: return res; } @@@ -1082,13 -1093,13 +1082,13 @@@ u8 rtw_free_assoc_resources_cmd(struct u8 res = _SUCCESS;
ph2c = rtw_zmalloc(sizeof(struct cmd_obj)); - if (ph2c == NULL) { + if (!ph2c) { res = _FAIL; goto exit; }
pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm)); - if (pdrvextra_cmd_parm == NULL) { + if (!pdrvextra_cmd_parm) { kfree(ph2c); res = _FAIL; goto exit; @@@ -1101,8 -1112,10 +1101,8 @@@
init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
- /* rtw_enqueue_cmd(pcmdpriv, ph2c); */ res = rtw_enqueue_cmd(pcmdpriv, ph2c); - exit: return res; } @@@ -1116,13 -1129,13 +1116,13 @@@ u8 rtw_dynamic_chk_wk_cmd(struct adapte
/* only primary padapter does this cmd */ ph2c = rtw_zmalloc(sizeof(struct cmd_obj)); - if (ph2c == NULL) { + if (!ph2c) { res = _FAIL; goto exit; }
pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm)); - if (pdrvextra_cmd_parm == NULL) { + if (!pdrvextra_cmd_parm) { kfree(ph2c); res = _FAIL; goto exit; @@@ -1137,6 -1150,7 +1137,6 @@@
/* rtw_enqueue_cmd(pcmdpriv, ph2c); */ res = rtw_enqueue_cmd(pcmdpriv, ph2c); - exit: return res; } @@@ -1150,7 -1164,7 +1150,7 @@@ u8 rtw_set_chplan_cmd(struct adapter *p u8 res = _SUCCESS;
/* check if allow software config */ - if (swconfig && rtw_hal_is_disable_sw_channel_plan(padapter) == true) { + if (swconfig && rtw_hal_is_disable_sw_channel_plan(padapter)) { res = _FAIL; goto exit; } @@@ -1163,7 -1177,7 +1163,7 @@@
/* prepare cmd parameter */ setChannelPlan_param = rtw_zmalloc(sizeof(struct SetChannelPlan_param)); - if (setChannelPlan_param == NULL) { + if (!setChannelPlan_param) { res = _FAIL; goto exit; } @@@ -1172,7 -1186,7 +1172,7 @@@ if (enqueue) { /* need enqueue, prepare cmd_obj and enqueue */ pcmdobj = rtw_zmalloc(sizeof(struct cmd_obj)); - if (pcmdobj == NULL) { + if (!pcmdobj) { kfree(setChannelPlan_param); res = _FAIL; goto exit; @@@ -1229,14 -1243,14 +1229,14 @@@ u8 traffic_status_watchdog(struct adapt u8 bBusyTraffic = false, bTxBusyTraffic = false, bRxBusyTraffic = false; u8 bHigherBusyTraffic = false, bHigherBusyRxTraffic = false, bHigherBusyTxTraffic = false;
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); + struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
collect_traffic_statistics(padapter);
/* */ /* Determine if our traffic is busy now */ /* */ - if ((check_fwstate(pmlmepriv, _FW_LINKED) == true) + if ((check_fwstate(pmlmepriv, _FW_LINKED)) /*&& !MgntInitAdapterInProgress(pMgntInfo)*/) { /* if we raise bBusyTraffic in last watchdog, using lower threshold. */ if (pmlmepriv->LinkDetectInfo.bBusyTraffic) @@@ -1268,7 -1282,7 +1268,7 @@@ (pmlmepriv->LinkDetectInfo.NumRxUnicastOkInPeriod > 2)) { bEnterPS = false;
- if (bBusyTraffic == true) { + if (bBusyTraffic) { if (pmlmepriv->LinkDetectInfo.TrafficTransitionCount <= 4) pmlmepriv->LinkDetectInfo.TrafficTransitionCount = 4;
@@@ -1301,7 -1315,7 +1301,7 @@@ struct dvobj_priv *dvobj = adapter_to_dvobj(padapter); int n_assoc_iface = 0;
- if (check_fwstate(&(dvobj->padapters->mlmepriv), WIFI_ASOC_STATE)) + if (check_fwstate(&dvobj->padapters->mlmepriv, WIFI_ASOC_STATE)) n_assoc_iface++;
if (!from_timer && n_assoc_iface == 0) @@@ -1326,18 -1340,21 +1326,18 @@@ static void dynamic_chk_wk_hdl(struct a { struct mlme_priv *pmlmepriv;
- pmlmepriv = &(padapter->mlmepriv); + pmlmepriv = &padapter->mlmepriv;
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) + if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) expire_timeout_chk(padapter);
/* for debug purpose */ _linked_info_dump(padapter); - - /* if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING|_FW_UNDER_SURVEY) ==false) */ { linked_status_chk(padapter); traffic_status_watchdog(padapter, 0); } - rtw_hal_dm_watchdog(padapter);
/* check_hw_pbc(padapter, pdrvextra_cmd->pbuf, pdrvextra_cmd->type); */ @@@ -1357,11 -1374,11 +1357,11 @@@ void lps_ctrl_wk_hdl(struct adapter *pa void lps_ctrl_wk_hdl(struct adapter *padapter, u8 lps_ctrl_type) { struct pwrctrl_priv *pwrpriv = adapter_to_pwrctl(padapter); - struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); + struct mlme_priv *pmlmepriv = &padapter->mlmepriv; u8 mstatus;
- if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) - || (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true)) { + if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) || + check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) { return; }
@@@ -1369,7 -1386,7 +1369,7 @@@ case LPS_CTRL_SCAN: hal_btcoex_ScanNotify(padapter, true);
- if (check_fwstate(pmlmepriv, _FW_LINKED) == true) { + if (check_fwstate(pmlmepriv, _FW_LINKED)) { /* connect */ LPS_Leave(padapter, "LPS_CTRL_SCAN"); } @@@ -1419,13 -1436,13 +1419,13 @@@ u8 rtw_lps_ctrl_wk_cmd(struct adapter *
if (enqueue) { ph2c = rtw_zmalloc(sizeof(struct cmd_obj)); - if (ph2c == NULL) { + if (!ph2c) { res = _FAIL; goto exit; }
pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm)); - if (pdrvextra_cmd_parm == NULL) { + if (!pdrvextra_cmd_parm) { kfree(ph2c); res = _FAIL; goto exit; @@@ -1461,13 -1478,13 +1461,13 @@@ u8 rtw_dm_in_lps_wk_cmd(struct adapter
ph2c = rtw_zmalloc(sizeof(struct cmd_obj)); - if (ph2c == NULL) { + if (!ph2c) { res = _FAIL; goto exit; }
pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm)); - if (pdrvextra_cmd_parm == NULL) { + if (!pdrvextra_cmd_parm) { kfree(ph2c); res = _FAIL; goto exit; @@@ -1483,7 -1500,9 +1483,7 @@@ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit: - return res; - }
static void rtw_lps_change_dtim_hdl(struct adapter *padapter, u8 dtim) @@@ -1493,7 -1512,7 +1493,7 @@@ if (dtim <= 0 || dtim > 16) return;
- if (hal_btcoex_IsBtControlLps(padapter) == true) + if (hal_btcoex_IsBtControlLps(padapter)) return;
mutex_lock(&pwrpriv->lock); @@@ -1523,14 -1542,15 +1523,14 @@@ u8 rtw_dm_ra_mask_wk_cmd(struct adapte struct cmd_priv *pcmdpriv = &padapter->cmdpriv; u8 res = _SUCCESS;
- ph2c = rtw_zmalloc(sizeof(struct cmd_obj)); - if (ph2c == NULL) { + if (!ph2c) { res = _FAIL; goto exit; }
pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm)); - if (pdrvextra_cmd_parm == NULL) { + if (!pdrvextra_cmd_parm) { kfree(ph2c); res = _FAIL; goto exit; @@@ -1557,14 -1577,15 +1557,14 @@@ u8 rtw_ps_cmd(struct adapter *padapter struct drvextra_cmd_parm *pdrvextra_cmd_parm; struct cmd_priv *pcmdpriv = &padapter->cmdpriv; u8 res = _SUCCESS; - ppscmd = rtw_zmalloc(sizeof(struct cmd_obj)); - if (ppscmd == NULL) { + if (!ppscmd) { res = _FAIL; goto exit; }
pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm)); - if (pdrvextra_cmd_parm == NULL) { + if (!pdrvextra_cmd_parm) { kfree(ppscmd); res = _FAIL; goto exit; @@@ -1597,7 -1618,7 +1597,7 @@@ static void rtw_chk_hi_queue_hdl(struc
rtw_hal_get_hwreg(padapter, HW_VAR_CHK_HI_QUEUE_EMPTY, &empty);
- while (false == empty && jiffies_to_msecs(jiffies - start) < g_wait_hiq_empty) { + while (!empty && jiffies_to_msecs(jiffies - start) < g_wait_hiq_empty) { msleep(100); rtw_hal_get_hwreg(padapter, HW_VAR_CHK_HI_QUEUE_EMPTY, &empty); } @@@ -1630,13 -1651,13 +1630,13 @@@ u8 rtw_chk_hi_queue_cmd(struct adapter u8 res = _SUCCESS;
ph2c = rtw_zmalloc(sizeof(struct cmd_obj)); - if (ph2c == NULL) { + if (!ph2c) { res = _FAIL; goto exit; }
pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm)); - if (pdrvextra_cmd_parm == NULL) { + if (!pdrvextra_cmd_parm) { kfree(ph2c); res = _FAIL; goto exit; @@@ -1652,7 -1673,9 +1652,7 @@@ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit: - return res; - }
struct btinfo { @@@ -1722,13 -1745,13 +1722,13 @@@ u8 rtw_c2h_packet_wk_cmd(struct adapte u8 res = _SUCCESS;
ph2c = rtw_zmalloc(sizeof(struct cmd_obj)); - if (ph2c == NULL) { + if (!ph2c) { res = _FAIL; goto exit; }
pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm)); - if (pdrvextra_cmd_parm == NULL) { + if (!pdrvextra_cmd_parm) { kfree(ph2c); res = _FAIL; goto exit; @@@ -1757,13 -1780,13 +1757,13 @@@ u8 rtw_c2h_wk_cmd(struct adapter *padap u8 res = _SUCCESS;
ph2c = rtw_zmalloc(sizeof(struct cmd_obj)); - if (ph2c == NULL) { + if (!ph2c) { res = _FAIL; goto exit; }
pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm)); - if (pdrvextra_cmd_parm == NULL) { + if (!pdrvextra_cmd_parm) { kfree(ph2c); res = _FAIL; goto exit; @@@ -1817,7 -1840,7 +1817,7 @@@ static void c2h_wk_callback(struct work continue; }
- if (ccx_id_filter(c2h_evt) == true) { + if (ccx_id_filter(c2h_evt)) { /* Handle CCX report here */ rtw_hal_c2h_handler(adapter, c2h_evt); kfree(c2h_evt); @@@ -1936,9 -1959,9 +1936,9 @@@ void rtw_createbss_cmd_callback(struct struct wlan_network *pwlan = NULL; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)pcmd->parmbuf; - struct wlan_network *tgt_network = &(pmlmepriv->cur_network); + struct wlan_network *tgt_network = &pmlmepriv->cur_network;
- if (pcmd->parmbuf == NULL) + if (!pcmd->parmbuf) goto exit;
if (pcmd->res != H2C_SUCCESS) @@@ -1960,20 -1983,20 +1960,20 @@@ rtw_indicate_connect(padapter); } else { pwlan = rtw_alloc_network(pmlmepriv); - spin_lock_bh(&(pmlmepriv->scanned_queue.lock)); - if (pwlan == NULL) { + spin_lock_bh(&pmlmepriv->scanned_queue.lock); + if (!pwlan) { pwlan = rtw_get_oldest_wlan_network(&pmlmepriv->scanned_queue); - if (pwlan == NULL) { - spin_unlock_bh(&(pmlmepriv->scanned_queue.lock)); + if (!pwlan) { + spin_unlock_bh(&pmlmepriv->scanned_queue.lock); goto createbss_cmd_fail; } pwlan->last_scanned = jiffies; } else { - list_add_tail(&(pwlan->list), &pmlmepriv->scanned_queue.queue); + list_add_tail(&pwlan->list, &pmlmepriv->scanned_queue.queue); }
pnetwork->length = get_wlan_bssid_ex_sz(pnetwork); - memcpy(&(pwlan->network), pnetwork, pnetwork->length); + memcpy(&pwlan->network, pnetwork, pnetwork->length); /* pwlan->fixed = true; */
/* list_add_tail(&(pwlan->list), &pmlmepriv->scanned_queue.queue); */ @@@ -1986,7 -2009,7 +1986,7 @@@
_clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
- spin_unlock_bh(&(pmlmepriv->scanned_queue.lock)); + spin_unlock_bh(&pmlmepriv->scanned_queue.lock); /* we will set _FW_LINKED when there is one more sat to join us (rtw_stassoc_event_callback) */
} @@@ -1998,10 -2021,13 +1998,10 @@@ exit rtw_free_cmd_obj(pcmd); }
- - void rtw_setstaKey_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd) { - struct sta_priv *pstapriv = &padapter->stapriv; - struct set_stakey_rsp *psetstakey_rsp = (struct set_stakey_rsp *) (pcmd->rsp); + struct set_stakey_rsp *psetstakey_rsp = (struct set_stakey_rsp *)(pcmd->rsp); struct sta_info *psta = rtw_get_stainfo(pstapriv, psetstakey_rsp->addr);
if (!psta) @@@ -2016,7 -2042,7 +2016,7 @@@ void rtw_setassocsta_cmdrsp_callback(st struct sta_priv *pstapriv = &padapter->stapriv; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct set_assocsta_parm *passocsta_parm = (struct set_assocsta_parm *)(pcmd->parmbuf); - struct set_assocsta_rsp *passocsta_rsp = (struct set_assocsta_rsp *) (pcmd->rsp); + struct set_assocsta_rsp *passocsta_rsp = (struct set_assocsta_rsp *)(pcmd->rsp); struct sta_info *psta = rtw_get_stainfo(pstapriv, passocsta_parm->addr);
if (!psta) @@@ -2027,7 -2053,7 +2027,7 @@@
spin_lock_bh(&pmlmepriv->lock);
- if ((check_fwstate(pmlmepriv, WIFI_MP_STATE) == true) && (check_fwstate(pmlmepriv, _FW_UNDER_LINKING) == true)) + if (check_fwstate(pmlmepriv, WIFI_MP_STATE) && check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
set_fwstate(pmlmepriv, _FW_LINKED); diff --combined drivers/staging/rtl8723bs/core/rtw_xmit.c index 46054d6a1fb5,0c357bc2478c..13b8bd5ffabc --- a/drivers/staging/rtl8723bs/core/rtw_xmit.c +++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c @@@ -13,8 -13,7 +13,8 @@@ static u8 RFC1042_OUI[P80211_OUI_LEN] static void _init_txservq(struct tx_servq *ptxservq) { INIT_LIST_HEAD(&ptxservq->tx_pending); - _rtw_init_queue(&ptxservq->sta_pending); + INIT_LIST_HEAD(&ptxservq->sta_pending.queue); + spin_lock_init(&ptxservq->sta_pending.lock); ptxservq->qcnt = 0; }
@@@ -50,19 -49,13 +50,19 @@@ s32 _rtw_init_xmit_priv(struct xmit_pri
pxmitpriv->adapter = padapter;
- _rtw_init_queue(&pxmitpriv->be_pending); - _rtw_init_queue(&pxmitpriv->bk_pending); - _rtw_init_queue(&pxmitpriv->vi_pending); - _rtw_init_queue(&pxmitpriv->vo_pending); - _rtw_init_queue(&pxmitpriv->bm_pending); + INIT_LIST_HEAD(&pxmitpriv->be_pending.queue); + spin_lock_init(&pxmitpriv->be_pending.lock); + INIT_LIST_HEAD(&pxmitpriv->bk_pending.queue); + spin_lock_init(&pxmitpriv->bk_pending.lock); + INIT_LIST_HEAD(&pxmitpriv->vi_pending.queue); + spin_lock_init(&pxmitpriv->vi_pending.lock); + INIT_LIST_HEAD(&pxmitpriv->vo_pending.queue); + spin_lock_init(&pxmitpriv->vo_pending.lock); + INIT_LIST_HEAD(&pxmitpriv->bm_pending.queue); + spin_lock_init(&pxmitpriv->bm_pending.lock);
- _rtw_init_queue(&pxmitpriv->free_xmit_queue); + INIT_LIST_HEAD(&pxmitpriv->free_xmit_queue.queue); + spin_lock_init(&pxmitpriv->free_xmit_queue.lock);
/* * Please allocate memory with the sz = (struct xmit_frame) * NR_XMITFRAME, @@@ -103,10 -96,8 +103,10 @@@ pxmitpriv->frag_len = MAX_FRAG_THRESHOLD;
/* init xmit_buf */ - _rtw_init_queue(&pxmitpriv->free_xmitbuf_queue); - _rtw_init_queue(&pxmitpriv->pending_xmitbuf_queue); + INIT_LIST_HEAD(&pxmitpriv->free_xmitbuf_queue.queue); + spin_lock_init(&pxmitpriv->free_xmitbuf_queue.lock); + INIT_LIST_HEAD(&pxmitpriv->pending_xmitbuf_queue.queue); + spin_lock_init(&pxmitpriv->pending_xmitbuf_queue.lock);
pxmitpriv->pallocated_xmitbuf = vzalloc(NR_XMITBUFF * sizeof(struct xmit_buf) + 4);
@@@ -154,8 -145,7 +154,8 @@@ pxmitpriv->free_xmitbuf_cnt = NR_XMITBUFF;
/* init xframe_ext queue, the same count as extbuf */ - _rtw_init_queue(&pxmitpriv->free_xframe_ext_queue); + INIT_LIST_HEAD(&pxmitpriv->free_xframe_ext_queue.queue); + spin_lock_init(&pxmitpriv->free_xframe_ext_queue.lock);
pxmitpriv->xframe_ext_alloc_addr = vzalloc(NR_XMIT_EXTBUFF * sizeof(struct xmit_frame) + 4);
@@@ -188,8 -178,7 +188,8 @@@ pxmitpriv->free_xframe_ext_cnt = NR_XMIT_EXTBUFF;
/* Init xmit extension buff */ - _rtw_init_queue(&pxmitpriv->free_xmit_extbuf_queue); + INIT_LIST_HEAD(&pxmitpriv->free_xmit_extbuf_queue.queue); + spin_lock_init(&pxmitpriv->free_xmit_extbuf_queue.lock);
pxmitpriv->pallocated_xmit_extbuf = vzalloc(NR_XMIT_EXTBUFF * sizeof(struct xmit_buf) + 4);
@@@ -1734,12 -1723,15 +1734,12 @@@ void rtw_free_xmitframe_queue(struct xm struct list_head *plist, *phead, *tmp; struct xmit_frame *pxmitframe;
- spin_lock_bh(&pframequeue->lock); - phead = get_list_head(pframequeue); list_for_each_safe(plist, tmp, phead) { pxmitframe = list_entry(plist, struct xmit_frame, list);
rtw_free_xmitframe(pxmitpriv, pxmitframe); } - spin_unlock_bh(&pframequeue->lock); }
s32 rtw_xmitframe_enqueue(struct adapter *padapter, struct xmit_frame *pxmitframe) @@@ -1794,7 -1786,6 +1794,7 @@@ s32 rtw_xmit_classifier(struct adapter struct sta_info *psta; struct tx_servq *ptxservq; struct pkt_attrib *pattrib = &pxmitframe->attrib; + struct xmit_priv *xmit_priv = &padapter->xmitpriv; struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits; signed int res = _SUCCESS;
@@@ -1812,14 -1803,12 +1812,14 @@@
ptxservq = rtw_get_sta_pending(padapter, psta, pattrib->priority, (u8 *)(&ac_index));
+ spin_lock_bh(&xmit_priv->lock); if (list_empty(&ptxservq->tx_pending)) list_add_tail(&ptxservq->tx_pending, get_list_head(phwxmits[ac_index].sta_queue));
list_add_tail(&pxmitframe->list, get_list_head(&ptxservq->sta_pending)); ptxservq->qcnt++; phwxmits[ac_index].accnt++; + spin_unlock_bh(&xmit_priv->lock);
exit:
@@@ -2202,10 -2191,11 +2202,10 @@@ void wakeup_sta_to_xmit(struct adapter struct list_head *xmitframe_plist, *xmitframe_phead, *tmp; struct xmit_frame *pxmitframe = NULL; struct sta_priv *pstapriv = &padapter->stapriv; - struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
psta_bmc = rtw_get_bcmc_stainfo(padapter);
- spin_lock_bh(&pxmitpriv->lock); + spin_lock_bh(&psta->sleep_q.lock);
xmitframe_phead = get_list_head(&psta->sleep_q); list_for_each_safe(xmitframe_plist, tmp, xmitframe_phead) { @@@ -2306,7 -2296,7 +2306,7 @@@
_exit:
- spin_unlock_bh(&pxmitpriv->lock); + spin_unlock_bh(&psta->sleep_q.lock);
if (update_mask) update_beacon(padapter, WLAN_EID_TIM, NULL, true); @@@ -2318,8 -2308,9 +2318,8 @@@ void xmit_delivery_enabled_frames(struc struct list_head *xmitframe_plist, *xmitframe_phead, *tmp; struct xmit_frame *pxmitframe = NULL; struct sta_priv *pstapriv = &padapter->stapriv; - struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- spin_lock_bh(&pxmitpriv->lock); + spin_lock_bh(&psta->sleep_q.lock);
xmitframe_phead = get_list_head(&psta->sleep_q); list_for_each_safe(xmitframe_plist, tmp, xmitframe_phead) { @@@ -2372,7 -2363,7 +2372,7 @@@ } }
- spin_unlock_bh(&pxmitpriv->lock); + spin_unlock_bh(&psta->sleep_q.lock); }
void enqueue_pending_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf) @@@ -2500,7 -2491,7 +2500,7 @@@ int rtw_xmit_thread(void *context
complete(&padapter->xmitpriv.terminate_xmitthread_comp);
- thread_exit(); + return 0; }
void rtw_sctx_init(struct submit_ctx *sctx, int timeout_ms) diff --combined drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c index 5f5c4719b586,2b9a41b12d1f..7fe3df863fe1 --- a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c +++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c @@@ -435,7 -435,7 +435,7 @@@ int rtl8723bs_xmit_thread(void *context
complete(&pxmitpriv->SdioXmitTerminate);
- thread_exit(); + return 0; }
s32 rtl8723bs_mgnt_xmit( @@@ -507,7 -507,9 +507,7 @@@ s32 rtl8723bs_hal_xmit rtw_issue_addbareq_cmd(padapter, pxmitframe); }
- spin_lock_bh(&pxmitpriv->lock); err = rtw_xmitframe_enqueue(padapter, pxmitframe); - spin_unlock_bh(&pxmitpriv->lock); if (err != _SUCCESS) { rtw_free_xmitframe(pxmitpriv, pxmitframe);
diff --combined fs/binfmt_elf.c index a813b70f594e,796e5327ee7d..fa582748be41 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@@ -630,7 -630,7 +630,7 @@@ static unsigned long load_elf_interp(st
vaddr = eppnt->p_vaddr; if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) - elf_type |= MAP_FIXED_NOREPLACE; + elf_type |= MAP_FIXED; else if (no_base && interp_elf_ex->e_type == ET_DYN) load_addr = -vaddr;
@@@ -1834,7 -1834,7 +1834,7 @@@ static int fill_note_info(struct elfhd /* * Allocate a structure for each thread. */ - for (ct = &dump_task->mm->core_state->dumper; ct; ct = ct->next) { + for (ct = &dump_task->signal->core_state->dumper; ct; ct = ct->next) { t = kzalloc(offsetof(struct elf_thread_core_info, notes[info->thread_notes]), GFP_KERNEL); @@@ -2024,7 -2024,7 +2024,7 @@@ static int fill_note_info(struct elfhd if (!elf_note_info_init(info)) return 0;
- for (ct = current->mm->core_state->dumper.next; + for (ct = current->signal->core_state->dumper.next; ct; ct = ct->next) { ets = kzalloc(sizeof(*ets), GFP_KERNEL); if (!ets) diff --combined fs/proc/array.c index 77cf4187adec,520c51be1e57..ff869a66b34e --- a/fs/proc/array.c +++ b/fs/proc/array.c @@@ -408,9 -408,9 +408,9 @@@ static void task_cpus_allowed(struct se cpumask_pr_args(&task->cpus_mask)); }
- static inline void task_core_dumping(struct seq_file *m, struct mm_struct *mm) + static inline void task_core_dumping(struct seq_file *m, struct task_struct *task) { - seq_put_decimal_ull(m, "CoreDumping:\t", !!mm->core_state); + seq_put_decimal_ull(m, "CoreDumping:\t", !!task->signal->core_state); seq_putc(m, '\n'); }
@@@ -436,7 -436,7 +436,7 @@@ int proc_pid_status(struct seq_file *m
if (mm) { task_mem(m, mm); - task_core_dumping(m, mm); + task_core_dumping(m, task); task_thp_status(m, mm); mmput(mm); } @@@ -541,7 -541,7 +541,7 @@@ static int do_task_stat(struct seq_fil }
if (permitted && (!whole || num_threads < 2)) - wchan = get_wchan(task); + wchan = !task_is_running(task); if (!whole) { min_flt = task->min_flt; maj_flt = task->maj_flt; @@@ -606,7 -606,10 +606,7 @@@ * * This works with older implementations of procps as well. */ - if (wchan) - seq_puts(m, " 1"); - else - seq_puts(m, " 0"); + seq_put_decimal_ull(m, " ", wchan);
seq_put_decimal_ull(m, " ", 0); seq_put_decimal_ull(m, " ", 0); diff --combined include/linux/mm_types.h index 8f3131477ec6,1039f6ae922c..f7326c8704bb --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@@ -12,7 -12,6 +12,7 @@@ #include <linux/completion.h> #include <linux/cpumask.h> #include <linux/uprobes.h> +#include <linux/rcupdate.h> #include <linux/page-flags-layout.h> #include <linux/workqueue.h> #include <linux/seqlock.h> @@@ -105,7 -104,18 +105,7 @@@ struct page struct page_pool *pp; unsigned long _pp_mapping_pad; unsigned long dma_addr; - union { - /** - * dma_addr_upper: might require a 64-bit - * value on 32-bit architectures. - */ - unsigned long dma_addr_upper; - /** - * For frag page support, not supported in - * 32-bit architectures with 64-bit DMA. - */ - atomic_long_t pp_frag_count; - }; + atomic_long_t pp_frag_count; }; struct { /* slab, slob and slub */ union { @@@ -229,72 -239,6 +229,72 @@@ #endif } _struct_page_alignment;
+/** + * struct folio - Represents a contiguous set of bytes. + * @flags: Identical to the page flags. + * @lru: Least Recently Used list; tracks how recently this folio was used. + * @mapping: The file this page belongs to, or refers to the anon_vma for + * anonymous memory. + * @index: Offset within the file, in units of pages. For anonymous memory, + * this is the index from the beginning of the mmap. + * @private: Filesystem per-folio data (see folio_attach_private()). + * Used for swp_entry_t if folio_test_swapcache(). + * @_mapcount: Do not access this member directly. Use folio_mapcount() to + * find out how many times this folio is mapped by userspace. + * @_refcount: Do not access this member directly. Use folio_ref_count() + * to find how many references there are to this folio. + * @memcg_data: Memory Control Group data. + * + * A folio is a physically, virtually and logically contiguous set + * of bytes. It is a power-of-two in size, and it is aligned to that + * same power-of-two. It is at least as large as %PAGE_SIZE. If it is + * in the page cache, it is at a file offset which is a multiple of that + * power-of-two. It may be mapped into userspace at an address which is + * at an arbitrary page offset, but its kernel virtual address is aligned + * to its size. + */ +struct folio { + /* private: don't document the anon union */ + union { + struct { + /* public: */ + unsigned long flags; + struct list_head lru; + struct address_space *mapping; + pgoff_t index; + void *private; + atomic_t _mapcount; + atomic_t _refcount; +#ifdef CONFIG_MEMCG + unsigned long memcg_data; +#endif + /* private: the union with struct page is transitional */ + }; + struct page page; + }; +}; + +static_assert(sizeof(struct page) == sizeof(struct folio)); +#define FOLIO_MATCH(pg, fl) \ + static_assert(offsetof(struct page, pg) == offsetof(struct folio, fl)) +FOLIO_MATCH(flags, flags); +FOLIO_MATCH(lru, lru); +FOLIO_MATCH(compound_head, lru); +FOLIO_MATCH(index, index); +FOLIO_MATCH(private, private); +FOLIO_MATCH(_mapcount, _mapcount); +FOLIO_MATCH(_refcount, _refcount); +#ifdef CONFIG_MEMCG +FOLIO_MATCH(memcg_data, memcg_data); +#endif +#undef FOLIO_MATCH + +static inline atomic_t *folio_mapcount_ptr(struct folio *folio) +{ + struct page *tail = &folio->page + 1; + return &tail->compound_mapcount; +} + static inline atomic_t *compound_mapcount_ptr(struct page *page) { return &page[1].compound_mapcount; @@@ -313,12 -257,6 +313,12 @@@ static inline atomic_t *compound_pincou #define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK) #define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
+/* + * page_private can be used on tail pages. However, PagePrivate is only + * checked by the VM on the head page. So page_private on the tail pages + * should be used for data that's ancillary to the head page (eg attaching + * buffer heads to tail pages after attaching buffer heads to the head page) + */ #define page_private(page) ((page)->private)
static inline void set_page_private(struct page *page, unsigned long private) @@@ -326,11 -264,6 +326,11 @@@ page->private = private; }
+static inline void *folio_get_private(struct folio *folio) +{ + return folio->private; +} + struct page_frag_cache { void * va; #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) @@@ -454,17 -387,6 +454,6 @@@ struct vm_area_struct struct vm_userfaultfd_ctx vm_userfaultfd_ctx; } __randomize_layout;
- struct core_thread { - struct task_struct *task; - struct core_thread *next; - }; - - struct core_state { - atomic_t nr_threads; - struct core_thread dumper; - struct completion startup; - }; - struct kioctx_table; struct mm_struct { struct { @@@ -585,8 -507,6 +574,6 @@@
unsigned long flags; /* Must use atomic bitops to access */
- struct core_state *core_state; /* coredumping support */ - #ifdef CONFIG_AIO spinlock_t ioctx_lock; struct kioctx_table __rcu *ioctx_table; @@@ -639,9 -559,6 +626,9 @@@ bool tlb_flush_batched; #endif struct uprobes_state uprobes_state; +#ifdef CONFIG_PREEMPT_RT + struct rcu_head delayed_drop; +#endif #ifdef CONFIG_HUGETLB_PAGE atomic_long_t hugetlb_usage; #endif diff --combined include/linux/sched.h index f44e14c43b74,f3741f23935e..86e1fd460ae5 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@@ -503,8 -503,6 +503,8 @@@ struct sched_statistics
u64 block_start; u64 block_max; + s64 sum_block_runtime; + u64 exec_max; u64 slice_max;
@@@ -524,7 -522,7 +524,7 @@@ u64 nr_wakeups_passive; u64 nr_wakeups_idle; #endif -}; +} ____cacheline_aligned;
struct sched_entity { /* For load-balancing: */ @@@ -540,6 -538,8 +540,6 @@@
u64 nr_migrations;
- struct sched_statistics statistics; - #ifdef CONFIG_FAIR_GROUP_SCHED int depth; struct sched_entity *parent; @@@ -775,10 -775,10 +775,10 @@@ struct task_struct int normal_prio; unsigned int rt_priority;
- const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; struct sched_dl_entity dl; + const struct sched_class *sched_class;
#ifdef CONFIG_SCHED_CORE struct rb_node core_node; @@@ -803,8 -803,6 +803,8 @@@ struct uclamp_se uclamp[UCLAMP_CNT]; #endif
+ struct sched_statistics stats; + #ifdef CONFIG_PREEMPT_NOTIFIERS /* List of struct preempt_notifier: */ struct hlist_head preempt_notifiers; @@@ -1162,8 -1160,10 +1162,8 @@@ /* Stacked block device info: */ struct bio_list *bio_list;
-#ifdef CONFIG_BLOCK /* Stack plugging: */ struct blk_plug *plug; -#endif
/* VM state: */ struct reclaim_state *reclaim_state; @@@ -1471,7 -1471,6 +1471,7 @@@ mce_whole_page : 1, __mce_reserved : 62; struct callback_head mce_kill_me; + int mce_count; #endif
#ifdef CONFIG_KRETPROBES @@@ -1665,6 -1664,7 +1665,7 @@@ extern struct pid *cad_pid #define PF_VCPU 0x00000001 /* I'm a virtual CPU */ #define PF_IDLE 0x00000002 /* I am an IDLE thread */ #define PF_EXITING 0x00000004 /* Getting shut down */ + #define PF_POSTCOREDUMP 0x00000008 /* Coredumps should ignore this task */ #define PF_IO_WORKER 0x00000010 /* Task is an IO worker */ #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ #define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */ @@@ -1720,7 -1720,7 +1721,7 @@@ #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) #define used_math() tsk_used_math(current)
-static inline bool is_percpu_thread(void) +static __always_inline bool is_percpu_thread(void) { #ifdef CONFIG_SMP return (current->flags & PF_NO_SETAFFINITY) && @@@ -2039,7 -2039,7 +2040,7 @@@ static inline int _cond_resched(void) #endif /* !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) */
#define cond_resched() ({ \ - ___might_sleep(__FILE__, __LINE__, 0); \ + __might_resched(__FILE__, __LINE__, 0); \ _cond_resched(); \ })
@@@ -2047,38 -2047,19 +2048,38 @@@ extern int __cond_resched_lock(spinlock extern int __cond_resched_rwlock_read(rwlock_t *lock); extern int __cond_resched_rwlock_write(rwlock_t *lock);
-#define cond_resched_lock(lock) ({ \ - ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\ - __cond_resched_lock(lock); \ +#define MIGHT_RESCHED_RCU_SHIFT 8 +#define MIGHT_RESCHED_PREEMPT_MASK ((1U << MIGHT_RESCHED_RCU_SHIFT) - 1) + +#ifndef CONFIG_PREEMPT_RT +/* + * Non RT kernels have an elevated preempt count due to the held lock, + * but are not allowed to be inside a RCU read side critical section + */ +# define PREEMPT_LOCK_RESCHED_OFFSETS PREEMPT_LOCK_OFFSET +#else +/* + * spin/rw_lock() on RT implies rcu_read_lock(). The might_sleep() check in + * cond_resched*lock() has to take that into account because it checks for + * preempt_count() and rcu_preempt_depth(). + */ +# define PREEMPT_LOCK_RESCHED_OFFSETS \ + (PREEMPT_LOCK_OFFSET + (1U << MIGHT_RESCHED_RCU_SHIFT)) +#endif + +#define cond_resched_lock(lock) ({ \ + __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \ + __cond_resched_lock(lock); \ })
-#define cond_resched_rwlock_read(lock) ({ \ - __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \ - __cond_resched_rwlock_read(lock); \ +#define cond_resched_rwlock_read(lock) ({ \ + __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \ + __cond_resched_rwlock_read(lock); \ })
-#define cond_resched_rwlock_write(lock) ({ \ - __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \ - __cond_resched_rwlock_write(lock); \ +#define cond_resched_rwlock_write(lock) ({ \ + __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \ + __cond_resched_rwlock_write(lock); \ })
static inline void cond_resched_rcu(void) @@@ -2156,7 -2137,6 +2157,7 @@@ static inline void set_task_cpu(struct #endif /* CONFIG_SMP */
extern bool sched_task_on_rq(struct task_struct *p); +extern unsigned long get_wchan(struct task_struct *p);
/* * In order to reduce various lock holder preemption latencies provide an diff --combined kernel/exit.c index 50f1692c732d,2b355e926c13..f702a6a63686 --- a/kernel/exit.c +++ b/kernel/exit.c @@@ -48,6 -48,7 +48,6 @@@ #include <linux/pipe_fs_i.h> #include <linux/audit.h> /* for audit_free() */ #include <linux/resource.h> -#include <linux/blkdev.h> #include <linux/task_io_accounting_ops.h> #include <linux/tracehook.h> #include <linux/fs_struct.h> @@@ -63,7 -64,6 +63,7 @@@ #include <linux/rcuwait.h> #include <linux/compat.h> #include <linux/io_uring.h> +#include <linux/kprobes.h>
#include <linux/uaccess.h> #include <asm/unistd.h> @@@ -168,7 -168,6 +168,7 @@@ static void delayed_put_task_struct(str { struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
+ kprobe_flush_task(tsk); perf_event_delayed_put(tsk); trace_sched_process_free(tsk); put_task_struct(tsk); @@@ -340,6 -339,46 +340,46 @@@ kill_orphaned_pgrp(struct task_struct * } }
+ static void coredump_task_exit(struct task_struct *tsk) + { + struct core_state *core_state; + + /* + * Serialize with any possible pending coredump. + * We must hold siglock around checking core_state + * and setting PF_POSTCOREDUMP. The core-inducing thread + * will increment ->nr_threads for each thread in the + * group without PF_POSTCOREDUMP set. + */ + spin_lock_irq(&tsk->sighand->siglock); + tsk->flags |= PF_POSTCOREDUMP; + core_state = tsk->signal->core_state; + spin_unlock_irq(&tsk->sighand->siglock); + if (core_state) { + struct core_thread self; + + self.task = current; + if (self.task->flags & PF_SIGNALED) + self.next = xchg(&core_state->dumper.next, &self); + else + self.task = NULL; + /* + * Implies mb(), the result of xchg() must be visible + * to core_state->dumper. + */ + if (atomic_dec_and_test(&core_state->nr_threads)) + complete(&core_state->startup); + + for (;;) { + set_current_state(TASK_UNINTERRUPTIBLE); + if (!self.task) /* see coredump_finish() */ + break; + freezable_schedule(); + } + __set_current_state(TASK_RUNNING); + } + } + #ifdef CONFIG_MEMCG /* * A task is exiting. If it owned this mm, find a new owner for the mm. @@@ -435,47 -474,12 +475,12 @@@ assign_new_owner static void exit_mm(void) { struct mm_struct *mm = current->mm; - struct core_state *core_state;
exit_mm_release(current, mm); if (!mm) return; sync_mm_rss(mm); - /* - * Serialize with any possible pending coredump. - * We must hold mmap_lock around checking core_state - * and clearing tsk->mm. The core-inducing thread - * will increment ->nr_threads for each thread in the - * group with ->mm != NULL. - */ mmap_read_lock(mm); - core_state = mm->core_state; - if (core_state) { - struct core_thread self; - - mmap_read_unlock(mm); - - self.task = current; - if (self.task->flags & PF_SIGNALED) - self.next = xchg(&core_state->dumper.next, &self); - else - self.task = NULL; - /* - * Implies mb(), the result of xchg() must be visible - * to core_state->dumper. - */ - if (atomic_dec_and_test(&core_state->nr_threads)) - complete(&core_state->startup); - - for (;;) { - set_current_state(TASK_UNINTERRUPTIBLE); - if (!self.task) /* see coredump_finish() */ - break; - freezable_schedule(); - } - __set_current_state(TASK_RUNNING); - mmap_read_lock(mm); - } mmgrab(mm); BUG_ON(mm != current->active_mm); /* more a memory barrier than a real lock */ @@@ -763,6 -767,7 +768,7 @@@ void __noreturn do_exit(long code profile_task_exit(tsk); kcov_task_exit(tsk);
+ coredump_task_exit(tsk); ptrace_event(PTRACE_EVENT_EXIT, code);
validate_creds_for_do_exit(tsk); diff --combined kernel/fork.c index 8e9feeef555e,c8adb76982f7..3f112b11a9ad --- a/kernel/fork.c +++ b/kernel/fork.c @@@ -76,6 -76,7 +76,6 @@@ #include <linux/taskstats_kern.h> #include <linux/random.h> #include <linux/tty.h> -#include <linux/blkdev.h> #include <linux/fs_struct.h> #include <linux/magic.h> #include <linux/perf_event.h> @@@ -1043,7 -1044,6 +1043,6 @@@ static struct mm_struct *mm_init(struc seqcount_init(&mm->write_protect_seq); mmap_init_lock(mm); INIT_LIST_HEAD(&mm->mmlist); - mm->core_state = NULL; mm_pgtables_bytes_init(mm); mm->map_count = 0; mm->locked_vm = 0; @@@ -1391,8 -1391,7 +1390,7 @@@ static void mm_release(struct task_stru * purposes. */ if (tsk->clear_child_tid) { - if (!(tsk->signal->flags & SIGNAL_GROUP_COREDUMP) && - atomic_read(&mm->mm_users) > 1) { + if (atomic_read(&mm->mm_users) > 1) { /* * We don't check the error code - if userspace has * not set up a proper pointer then tough luck. @@@ -2404,7 -2403,7 +2402,7 @@@ static __latent_entropy struct task_str write_unlock_irq(&tasklist_lock);
proc_fork_connector(p); - sched_post_fork(p); + sched_post_fork(p, args); cgroup_post_fork(p, args); perf_event_fork(p);
diff --combined kernel/kthread.c index 4a4d7092a2d8,33e17beaa682..7113003fab63 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@@ -270,7 -270,6 +270,7 @@@ EXPORT_SYMBOL_GPL(kthread_parkme)
static int kthread(void *_create) { + static const struct sched_param param = { .sched_priority = 0 }; /* Copy data: it's on kthread's stack */ struct kthread_create_info *create = _create; int (*threadfn)(void *data) = create->threadfn; @@@ -301,13 -300,6 +301,13 @@@ init_completion(&self->parked); current->vfork_done = &self->exited;
+ /* + * The new thread inherited kthreadd's priority and CPU mask. Reset + * back to default in case they have been changed. + */ + sched_setscheduler_nocheck(current, SCHED_NORMAL, ¶m); + set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_KTHREAD)); + /* OK, tell user we're spawned, wait for stop or wakeup */ __set_current_state(TASK_UNINTERRUPTIBLE); create->result = current; @@@ -405,6 -397,7 +405,6 @@@ struct task_struct *__kthread_create_on } task = create->result; if (!IS_ERR(task)) { - static const struct sched_param param = { .sched_priority = 0 }; char name[TASK_COMM_LEN];
/* @@@ -413,6 -406,13 +413,6 @@@ */ vsnprintf(name, sizeof(name), namefmt, args); set_task_comm(task, name); - /* - * root may have changed our (kthreadd's) priority or CPU mask. - * The kernel thread should not inherit these properties. - */ - sched_setscheduler_nocheck(task, SCHED_NORMAL, ¶m); - set_cpus_allowed_ptr(task, - housekeeping_cpumask(HK_FLAG_KTHREAD)); } kfree(create); return task; @@@ -433,7 -433,7 +433,7 @@@ * If thread is going to be bound on a particular cpu, give its node * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE. * When woken, the thread will run @threadfn() with @data as its - * argument. @threadfn() can either call do_exit() directly if it is a + * argument. @threadfn() can either return directly if it is a * standalone thread for which no one will call kthread_stop(), or * return when 'kthread_should_stop()' is true (which means * kthread_stop() has been called). The return value should be zero diff --combined kernel/reboot.c index efb40d095d1e,d6e0f9fb7f04..6bcc5d6a6572 --- a/kernel/reboot.c +++ b/kernel/reboot.c @@@ -33,7 -33,6 +33,7 @@@ EXPORT_SYMBOL(cad_pid) #define DEFAULT_REBOOT_MODE #endif enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE; +EXPORT_SYMBOL_GPL(reboot_mode); enum reboot_mode panic_reboot_mode = REBOOT_UNDEFINED;
/* @@@ -360,7 -359,6 +360,6 @@@ SYSCALL_DEFINE4(reboot, int, magic1, in case LINUX_REBOOT_CMD_HALT: kernel_halt(); do_exit(0); - panic("cannot halt");
case LINUX_REBOOT_CMD_POWER_OFF: kernel_power_off(); diff --combined kernel/signal.c index e99aff33ff14,505aa7fbfe43..7c4b7ae714d4 --- a/kernel/signal.c +++ b/kernel/signal.c @@@ -1323,6 -1323,7 +1323,7 @@@ force_sig_info_to_task(struct kernel_si blocked = sigismember(&t->blocked, sig); if (blocked || ignored || sigdfl) { action->sa.sa_handler = SIG_DFL; + action->sa.sa_flags |= SA_IMMUTABLE; if (blocked) { sigdelset(&t->blocked, sig); recalc_sigpending_and_wake(t); @@@ -1649,6 -1650,19 +1650,19 @@@ void force_sig(int sig } EXPORT_SYMBOL(force_sig);
+ void force_fatal_sig(int sig) + { + struct kernel_siginfo info; + + clear_siginfo(&info); + info.si_signo = sig; + info.si_errno = 0; + info.si_code = SI_KERNEL; + info.si_pid = 0; + info.si_uid = 0; + force_sig_info_to_task(&info, current, true); + } + /* * When things go south during signal handling, we * will force a SIGSEGV. And if the signal that caused @@@ -1657,15 -1671,10 +1671,10 @@@ */ void force_sigsegv(int sig) { - struct task_struct *p = current; - - if (sig == SIGSEGV) { - unsigned long flags; - spin_lock_irqsave(&p->sighand->siglock, flags); - p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; - spin_unlock_irqrestore(&p->sighand->siglock, flags); - } - force_sig(SIGSEGV); + if (sig == SIGSEGV) + force_fatal_sig(SIGSEGV); + else + force_sig(SIGSEGV); }
int force_sig_fault_to_task(int sig, int code, void __user *addr @@@ -2145,40 -2154,6 +2154,6 @@@ static void do_notify_parent_cldstop(st spin_unlock_irqrestore(&sighand->siglock, flags); }
- static inline bool may_ptrace_stop(void) - { - if (!likely(current->ptrace)) - return false; - /* - * Are we in the middle of do_coredump? - * If so and our tracer is also part of the coredump stopping - * is a deadlock situation, and pointless because our tracer - * is dead so don't allow us to stop. - * If SIGKILL was already sent before the caller unlocked - * ->siglock we must see ->core_state != NULL. Otherwise it - * is safe to enter schedule(). - * - * This is almost outdated, a task with the pending SIGKILL can't - * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported - * after SIGKILL was already dequeued. - */ - if (unlikely(current->mm->core_state) && - unlikely(current->mm == current->parent->mm)) - return false; - - return true; - } - - /* - * Return non-zero if there is a SIGKILL that should be waking us up. - * Called with the siglock held. - */ - static bool sigkill_pending(struct task_struct *tsk) - { - return sigismember(&tsk->pending.signal, SIGKILL) || - sigismember(&tsk->signal->shared_pending.signal, SIGKILL); - } - /* * This must be called with current->sighand->siglock held. * @@@ -2196,7 -2171,7 +2171,7 @@@ static void ptrace_stop(int exit_code, { bool gstop_done = false;
- if (arch_ptrace_stop_needed(exit_code, info)) { + if (arch_ptrace_stop_needed()) { /* * The arch code has something special to do before a * ptrace stop. This is allowed to block, e.g. for faults @@@ -2204,17 -2179,16 +2179,16 @@@ * calling arch_ptrace_stop, so we must release it now. * To preserve proper semantics, we must do this before * any signal bookkeeping like checking group_stop_count. - * Meanwhile, a SIGKILL could come in before we retake the - * siglock. That must prevent us from sleeping in TASK_TRACED. - * So after regaining the lock, we must check for SIGKILL. */ spin_unlock_irq(¤t->sighand->siglock); - arch_ptrace_stop(exit_code, info); + arch_ptrace_stop(); spin_lock_irq(¤t->sighand->siglock); - if (sigkill_pending(current)) - return; }
+ /* + * schedule() will not sleep if there is a pending signal that + * can awaken the task. + */ set_special_state(TASK_TRACED);
/* @@@ -2260,7 -2234,7 +2234,7 @@@
spin_unlock_irq(¤t->sighand->siglock); read_lock(&tasklist_lock); - if (may_ptrace_stop()) { + if (likely(current->ptrace)) { /* * Notify parents of the stop. * @@@ -2739,7 -2713,8 +2713,8 @@@ relock if (!signr) break; /* will return 0 */
- if (unlikely(current->ptrace) && signr != SIGKILL) { + if (unlikely(current->ptrace) && (signr != SIGKILL) && + !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) { signr = ptrace_signal(signr, &ksig->info); if (!signr) continue; @@@ -4089,6 -4064,10 +4064,10 @@@ int do_sigaction(int sig, struct k_siga k = &p->sighand->action[sig-1];
spin_lock_irq(&p->sighand->siglock); + if (k->sa.sa_flags & SA_IMMUTABLE) { + spin_unlock_irq(&p->sighand->siglock); + return -EINVAL; + } if (oact) *oact = *k;
@@@ -4138,29 -4117,11 +4117,29 @@@ return 0; }
+#ifdef CONFIG_DYNAMIC_SIGFRAME +static inline void sigaltstack_lock(void) + __acquires(¤t->sighand->siglock) +{ + spin_lock_irq(¤t->sighand->siglock); +} + +static inline void sigaltstack_unlock(void) + __releases(¤t->sighand->siglock) +{ + spin_unlock_irq(¤t->sighand->siglock); +} +#else +static inline void sigaltstack_lock(void) { } +static inline void sigaltstack_unlock(void) { } +#endif + static int do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp, size_t min_ss_size) { struct task_struct *t = current; + int ret = 0;
if (oss) { memset(oss, 0, sizeof(stack_t)); @@@ -4184,24 -4145,19 +4163,24 @@@ ss_mode != 0)) return -EINVAL;
+ sigaltstack_lock(); if (ss_mode == SS_DISABLE) { ss_size = 0; ss_sp = NULL; } else { if (unlikely(ss_size < min_ss_size)) - return -ENOMEM; + ret = -ENOMEM; + if (!sigaltstack_size_valid(ss_size)) + ret = -ENOMEM; } - - t->sas_ss_sp = (unsigned long) ss_sp; - t->sas_ss_size = ss_size; - t->sas_ss_flags = ss_flags; + if (!ret) { + t->sas_ss_sp = (unsigned long) ss_sp; + t->sas_ss_size = ss_size; + t->sas_ss_flags = ss_flags; + } + sigaltstack_unlock(); } - return 0; + return ret; }
SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss) diff --combined mm/debug.c index 714be101dec9,aa5fe4d5c4b4..d0020fc58202 --- a/mm/debug.c +++ b/mm/debug.c @@@ -24,9 -24,7 +24,9 @@@ const char *migrate_reason_names[MR_TYP "syscall_or_cpuset", "mempolicy_mbind", "numa_misplaced", - "cma", + "contig_range", + "longterm_pin", + "demotion", };
const struct trace_print_flags pageflag_names[] = { @@@ -162,7 -160,7 +162,7 @@@ static void __dump_page(struct page *pa out_mapping: BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
- pr_warn("%sflags: %#lx(%pGp)%s\n", type, head->flags, &head->flags, + pr_warn("%sflags: %pGp%s\n", type, &head->flags, page_cma ? " CMA" : ""); print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32, sizeof(unsigned long), page, @@@ -216,7 -214,7 +216,7 @@@ void dump_mm(const struct mm_struct *mm "start_code %lx end_code %lx start_data %lx end_data %lx\n" "start_brk %lx brk %lx start_stack %lx\n" "arg_start %lx arg_end %lx env_start %lx env_end %lx\n" - "binfmt %px flags %lx core_state %px\n" + "binfmt %px flags %lx\n" #ifdef CONFIG_AIO "ioctx_table %px\n" #endif @@@ -248,7 -246,7 +248,7 @@@ mm->start_code, mm->end_code, mm->start_data, mm->end_data, mm->start_brk, mm->brk, mm->start_stack, mm->arg_start, mm->arg_end, mm->env_start, mm->env_end, - mm->binfmt, mm->flags, mm->core_state, + mm->binfmt, mm->flags, #ifdef CONFIG_AIO mm->ioctx_table, #endif diff --combined mm/oom_kill.c index 989f35a2bbb1,7877c755ab37..50b984d048ce --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@@ -787,9 -787,9 +787,9 @@@ static inline bool __task_will_free_mem struct signal_struct *sig = task->signal;
/* - * A coredumping process may sleep for an extended period in exit_mm(), - * so the oom killer cannot assume that the process will promptly exit - * and release memory. + * A coredumping process may sleep for an extended period in + * coredump_task_exit(), so the oom killer cannot assume that + * the process will promptly exit and release memory. */ if (sig->flags & SIGNAL_GROUP_COREDUMP) return false; @@@ -1150,7 -1150,7 +1150,7 @@@ SYSCALL_DEFINE2(process_mrelease, int, struct task_struct *task; struct task_struct *p; unsigned int f_flags; - bool reap = true; + bool reap = false; struct pid *pid; long ret = 0;
@@@ -1177,15 -1177,15 +1177,15 @@@ goto put_task; }
- mm = p->mm; - mmgrab(mm); - - /* If the work has been done already, just exit with success */ - if (test_bit(MMF_OOM_SKIP, &mm->flags)) - reap = false; - else if (!task_will_free_mem(p)) { - reap = false; - ret = -EINVAL; + if (mmget_not_zero(p->mm)) { + mm = p->mm; + if (task_will_free_mem(p)) + reap = true; + else { + /* Error only if the work has not been done already */ + if (!test_bit(MMF_OOM_SKIP, &mm->flags)) + ret = -EINVAL; + } } task_unlock(p);
@@@ -1201,8 -1201,7 +1201,8 @@@ mmap_read_unlock(mm);
drop_mm: - mmdrop(mm); + if (mm) + mmput(mm); put_task: put_task_struct(task); put_pid: diff --combined net/batman-adv/tp_meter.c index fbcb15c7c29b,1252540cde17..93730d30af54 --- a/net/batman-adv/tp_meter.c +++ b/net/batman-adv/tp_meter.c @@@ -631,9 -631,9 +631,9 @@@ static void batadv_tp_recv_ack(struct b struct batadv_orig_node *orig_node = NULL; const struct batadv_icmp_tp_packet *icmp; struct batadv_tp_vars *tp_vars; + const unsigned char *dev_addr; size_t packet_len, mss; u32 rtt, recv_ack, cwnd; - unsigned char *dev_addr;
packet_len = BATADV_TP_PLEN; mss = BATADV_TP_PLEN; @@@ -890,7 -890,7 +890,7 @@@ out
batadv_tp_vars_put(tp_vars);
- do_exit(0); + return 0; }
/**