diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 32251aaad7bc..d6b3c49d42a8 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -1623,6 +1623,16 @@ bytes respectively. Such letter suffixes can also be entirely omitted. kstack=N [X86] Print N words from the kernel stack in oops dumps. + kthread= [KNL, SMP] Only run kernel threads on the specified + list of processors. The kernel will start threads + on the indicated processors only (unless there + are specific reasons to run a thread with + different affinities). This can be used to make + init start on certain processors and also to + control where kmod and other user space threads + are being spawned. Allows to keep kernel threads + away from certain cores unless absoluteluy necessary. + kvm.ignore_msrs=[KVM] Ignore guest accesses to unhandled MSRs. Default is 0 (don't ignore, but inject #GP) diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 0a9a6da21e74..065848bbf019 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -44,6 +44,7 @@ extern int nr_cpu_ids; * cpu_present_mask - has bit 'cpu' set iff cpu is populated * cpu_online_mask - has bit 'cpu' set iff cpu available to scheduler * cpu_active_mask - has bit 'cpu' set iff cpu available to migration + * cpu_kthread_mask - has bit 'cpu' set iff general kernel threads allowed * * If !CONFIG_HOTPLUG_CPU, present == possible, and active == online. * @@ -80,6 +81,7 @@ extern const struct cpumask *const cpu_possible_mask; extern const struct cpumask *const cpu_online_mask; extern const struct cpumask *const cpu_present_mask; extern const struct cpumask *const cpu_active_mask; +extern const struct cpumask *const cpu_kthread_mask; #if NR_CPUS > 1 #define num_online_cpus() cpumask_weight(cpu_online_mask) diff --git a/kernel/cpu.c b/kernel/cpu.c index 75d047b0b19b..18fc3f01829b 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -754,6 +754,19 @@ static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly; const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits); EXPORT_SYMBOL(cpu_active_mask); +static DECLARE_BITMAP(cpu_kthread_bits, CONFIG_NR_CPUS) __read_mostly + = CPU_BITS_ALL; +const struct cpumask *const cpu_kthread_mask = to_cpumask(cpu_kthread_bits); +EXPORT_SYMBOL(cpu_kthread_mask); + +static int __init kthread_setup(char *str) +{ + cpulist_parse(str, (struct cpumask *)&cpu_kthread_bits); + return 1; +} +__setup("kthread=", kthread_setup); + + void set_cpu_possible(unsigned int cpu, bool possible) { if (possible) diff --git a/kernel/kmod.c b/kernel/kmod.c index 80f7a6d00519..9fc55ecb2c9a 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c @@ -231,8 +231,8 @@ static int ____call_usermodehelper(void *data) flush_signal_handlers(current, 1); spin_unlock_irq(¤t->sighand->siglock); - /* We can run anywhere, unlike our parent keventd(). */ - set_cpus_allowed_ptr(current, cpu_all_mask); + /* We can run only where init is allowed to run. */ + set_cpus_allowed_ptr(current, cpu_kthread_mask); /* * Our parent is keventd, which runs with elevated scheduling priority. diff --git a/kernel/kthread.c b/kernel/kthread.c index 10e489c448fe..1b3e9651bb04 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -319,6 +319,7 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), */ sched_setscheduler_nocheck(task, SCHED_NORMAL, ¶m); set_cpus_allowed_ptr(task, cpu_all_mask); + set_cpus_allowed_ptr(task, cpu_kthread_mask); } kfree(create); return task; @@ -488,6 +489,7 @@ int kthreadd(void *unused) set_task_comm(tsk, "kthreadd"); ignore_signals(tsk); set_cpus_allowed_ptr(tsk, cpu_all_mask); + set_cpus_allowed_ptr(tsk, cpu_kthread_mask); set_mems_allowed(node_states[N_MEMORY]); current->flags |= PF_NOFREEZE; diff --git a/kernel/workqueue.c b/kernel/workqueue.c index ba8285b22d3f..dff0ccc57ec3 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -3815,6 +3815,7 @@ int apply_workqueue_attrs(struct workqueue_struct *wq, /* make a copy of @attrs and sanitize it */ copy_workqueue_attrs(new_attrs, attrs); cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask); + cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_kthread_mask); /* * We may create multiple pwqs with differing cpumasks. Make a @@ -4583,6 +4584,7 @@ static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu) /* is @cpu the only online CPU? */ cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask); + cpumask_and(&cpumask, pool->attrs->cpumask, cpu_kthread_mask); if (cpumask_weight(&cpumask) != 1) return;