Merge branch 'smp-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull SMP hotplug notifier removal from Thomas Gleixner:
 "This is the final cleanup of the hotplug notifier infrastructure. The
  series has been reintgrated in the last two days because there came a
  new driver using the old infrastructure via the SCSI tree.

  Summary:

   - convert the last leftover drivers utilizing notifiers

   - fixup for a completely broken hotplug user

   - prevent setup of already used states

   - removal of the notifiers

   - treewide cleanup of hotplug state names

   - consolidation of state space

  There is a sphinx based documentation pending, but that needs review
  from the documentation folks"

* 'smp-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  irqchip/armada-xp: Consolidate hotplug state space
  irqchip/gic: Consolidate hotplug state space
  coresight/etm3/4x: Consolidate hotplug state space
  cpu/hotplug: Cleanup state names
  cpu/hotplug: Remove obsolete cpu hotplug register/unregister functions
  staging/lustre/libcfs: Convert to hotplug state machine
  scsi/bnx2i: Convert to hotplug state machine
  scsi/bnx2fc: Convert to hotplug state machine
  cpu/hotplug: Prevent overwriting of callbacks
  x86/msr: Remove bogus cleanup from the error path
  bus: arm-ccn: Prevent hotplug callback leak
  perf/x86/intel/cstate: Prevent hotplug callback leak
  ARM/imx/mmcd: Fix broken cpu hotplug handling
  scsi: qedi: Convert to hotplug state machine
This commit is contained in:
Linus Torvalds 2016-12-25 14:05:56 -08:00
commit b272f732f8
72 changed files with 311 additions and 692 deletions

View File

@ -339,7 +339,7 @@ static int __init twd_local_timer_common_register(struct device_node *np)
}
cpuhp_setup_state_nocalls(CPUHP_AP_ARM_TWD_STARTING,
"AP_ARM_TWD_STARTING",
"arm/timer/twd:starting",
twd_timer_starting_cpu, twd_timer_dying_cpu);
twd_get_clock(np);

View File

@ -60,6 +60,7 @@
#define to_mmdc_pmu(p) container_of(p, struct mmdc_pmu, pmu)
static enum cpuhp_state cpuhp_mmdc_state;
static int ddr_type;
struct fsl_mmdc_devtype_data {
@ -451,8 +452,8 @@ static int imx_mmdc_remove(struct platform_device *pdev)
{
struct mmdc_pmu *pmu_mmdc = platform_get_drvdata(pdev);
cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
perf_pmu_unregister(&pmu_mmdc->pmu);
cpuhp_remove_state_nocalls(CPUHP_ONLINE);
kfree(pmu_mmdc);
return 0;
}
@ -472,6 +473,18 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b
return -ENOMEM;
}
/* The first instance registers the hotplug state */
if (!cpuhp_mmdc_state) {
ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
"perf/arm/mmdc:online", NULL,
mmdc_pmu_offline_cpu);
if (ret < 0) {
pr_err("cpuhp_setup_state_multi failed\n");
goto pmu_free;
}
cpuhp_mmdc_state = ret;
}
mmdc_num = mmdc_pmu_init(pmu_mmdc, mmdc_base, &pdev->dev);
if (mmdc_num == 0)
name = "mmdc";
@ -485,26 +498,23 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b
HRTIMER_MODE_REL);
pmu_mmdc->hrtimer.function = mmdc_pmu_timer_handler;
cpuhp_state_add_instance_nocalls(CPUHP_ONLINE,
&pmu_mmdc->node);
cpumask_set_cpu(smp_processor_id(), &pmu_mmdc->cpu);
ret = cpuhp_setup_state_multi(CPUHP_AP_NOTIFY_ONLINE,
"MMDC_ONLINE", NULL,
mmdc_pmu_offline_cpu);
if (ret) {
pr_err("cpuhp_setup_state_multi failure\n");
goto pmu_register_err;
}
cpumask_set_cpu(raw_smp_processor_id(), &pmu_mmdc->cpu);
/* Register the pmu instance for cpu hotplug */
cpuhp_state_add_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
ret = perf_pmu_register(&(pmu_mmdc->pmu), name, -1);
platform_set_drvdata(pdev, pmu_mmdc);
if (ret)
goto pmu_register_err;
platform_set_drvdata(pdev, pmu_mmdc);
return 0;
pmu_register_err:
pr_warn("MMDC Perf PMU failed (%d), disabled\n", ret);
cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
hrtimer_cancel(&pmu_mmdc->hrtimer);
pmu_free:
kfree(pmu_mmdc);
return ret;
}

View File

@ -148,7 +148,7 @@ static void __init armada_370_coherency_init(struct device_node *np)
of_node_put(cpu_config_np);
cpuhp_setup_state_nocalls(CPUHP_AP_ARM_MVEBU_COHERENCY,
"AP_ARM_MVEBU_COHERENCY",
"arm/mvebu/coherency:starting",
armada_xp_clear_l2_starting, NULL);
exit:
set_cpu_coherent();

View File

@ -563,7 +563,7 @@ static __init int l2x0_pmu_init(void)
cpumask_set_cpu(0, &pmu_cpu);
ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_L2X0_ONLINE,
"AP_PERF_ARM_L2X0_ONLINE", NULL,
"perf/arm/l2x0:online", NULL,
l2x0_pmu_offline_cpu);
if (ret)
goto out_pmu;

View File

@ -683,7 +683,7 @@ static void __init l2c310_enable(void __iomem *base, unsigned num_lock)
if (aux & L310_AUX_CTRL_FULL_LINE_ZERO)
cpuhp_setup_state(CPUHP_AP_ARM_L2X0_STARTING,
"AP_ARM_L2X0_STARTING", l2c310_starting_cpu,
"arm/l2x0:starting", l2c310_starting_cpu,
l2c310_dying_cpu);
}

View File

@ -799,7 +799,7 @@ static int __init vfp_init(void)
}
cpuhp_setup_state_nocalls(CPUHP_AP_ARM_VFP_STARTING,
"AP_ARM_VFP_STARTING", vfp_starting_cpu,
"arm/vfp:starting", vfp_starting_cpu,
vfp_dying_cpu);
vfp_vector = vfp_support_entry;

View File

@ -412,7 +412,7 @@ static int __init xen_guest_init(void)
pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
return cpuhp_setup_state(CPUHP_AP_ARM_XEN_STARTING,
"AP_ARM_XEN_STARTING", xen_starting_cpu,
"arm/xen:starting", xen_starting_cpu,
xen_dying_cpu);
}
early_initcall(xen_guest_init);

View File

@ -640,7 +640,7 @@ static int __init armv8_deprecated_init(void)
}
cpuhp_setup_state_nocalls(CPUHP_AP_ARM64_ISNDEP_STARTING,
"AP_ARM64_ISNDEP_STARTING",
"arm64/isndep:starting",
run_all_insn_set_hw_mode, NULL);
register_insn_emulation_sysctl(ctl_abi);

View File

@ -140,7 +140,7 @@ static int clear_os_lock(unsigned int cpu)
static int debug_monitors_init(void)
{
return cpuhp_setup_state(CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
"CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING",
"arm64/debug_monitors:starting",
clear_os_lock, NULL);
}
postcore_initcall(debug_monitors_init);

View File

@ -1001,7 +1001,7 @@ static int __init arch_hw_breakpoint_init(void)
* debugger will leave the world in a nice state for us.
*/
ret = cpuhp_setup_state(CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,
"CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING",
"perf/arm64/hw_breakpoint:starting",
hw_breakpoint_reset, NULL);
if (ret)
pr_err("failed to register CPU hotplug notifier: %d\n", ret);

View File

@ -475,7 +475,7 @@ static int __init bfin_pmu_init(void)
ret = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
if (!ret)
cpuhp_setup_state(CPUHP_PERF_BFIN, "PERF_BFIN",
cpuhp_setup_state(CPUHP_PERF_BFIN,"perf/bfin:starting",
bfin_pmu_prepare_cpu, NULL);
return ret;
}

View File

@ -868,7 +868,7 @@ static int __init init_hw_perf_events(void)
metag_out32(0, PERF_COUNT(1));
cpuhp_setup_state(CPUHP_AP_PERF_METAG_STARTING,
"AP_PERF_METAG_STARTING", metag_pmu_starting_cpu,
"perf/metag:starting", metag_pmu_starting_cpu,
NULL);
ret = perf_pmu_register(&pmu, metag_pmu->name, PERF_TYPE_RAW);

View File

@ -713,7 +713,7 @@ static int __init cps_pm_init(void)
pr_warn("pm-cps: no CPC, clock & power gating unavailable\n");
}
return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "AP_PM_CPS_CPU_ONLINE",
return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mips/cps_pm:online",
cps_pm_online_cpu, NULL);
}
arch_initcall(cps_pm_init);

View File

@ -186,7 +186,7 @@ static int __init loongson3_init(void)
{
on_each_cpu(reset_counters, NULL, 1);
cpuhp_setup_state_nocalls(CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
"AP_MIPS_OP_LOONGSON3_STARTING",
"mips/oprofile/loongson3:starting",
loongson3_starting_cpu, loongson3_dying_cpu);
save_perf_irq = perf_irq;
perf_irq = loongson3_perfcount_handler;

View File

@ -944,7 +944,7 @@ void __init initmem_init(void)
* _nocalls() + manual invocation is used because cpuhp is not yet
* initialized for the boot CPU.
*/
cpuhp_setup_state_nocalls(CPUHP_POWER_NUMA_PREPARE, "POWER_NUMA_PREPARE",
cpuhp_setup_state_nocalls(CPUHP_POWER_NUMA_PREPARE, "powerpc/numa:prepare",
ppc_numa_cpu_prepare, ppc_numa_cpu_dead);
for_each_present_cpu(cpu)
numa_setup_cpu(cpu);

View File

@ -2189,7 +2189,7 @@ int register_power_pmu(struct power_pmu *pmu)
#endif /* CONFIG_PPC64 */
perf_pmu_register(&power_pmu, "cpu", PERF_TYPE_RAW);
cpuhp_setup_state(CPUHP_PERF_POWER, "PERF_POWER",
cpuhp_setup_state(CPUHP_PERF_POWER, "perf/powerpc:prepare",
power_pmu_prepare_cpu, NULL);
return 0;
}

View File

@ -711,7 +711,7 @@ static int __init cpumf_pmu_init(void)
return rc;
}
return cpuhp_setup_state(CPUHP_AP_PERF_S390_CF_ONLINE,
"AP_PERF_S390_CF_ONLINE",
"perf/s390/cf:online",
s390_pmu_online_cpu, s390_pmu_offline_cpu);
}
early_initcall(cpumf_pmu_init);

View File

@ -1623,7 +1623,7 @@ static int __init init_cpum_sampling_pmu(void)
goto out;
}
cpuhp_setup_state(CPUHP_AP_PERF_S390_SF_ONLINE, "AP_PERF_S390_SF_ONLINE",
cpuhp_setup_state(CPUHP_AP_PERF_S390_SF_ONLINE, "perf/s390/sf:online",
s390_pmu_sf_online_cpu, s390_pmu_sf_offline_cpu);
out:
return err;

View File

@ -371,7 +371,7 @@ static int __init init_vdso(void)
/* notifier priority > KVM */
return cpuhp_setup_state(CPUHP_AP_X86_VDSO_VMA_ONLINE,
"AP_X86_VDSO_VMA_ONLINE", vgetcpu_online, NULL);
"x86/vdso/vma:online", vgetcpu_online, NULL);
}
subsys_initcall(init_vdso);
#endif /* CONFIG_X86_64 */

View File

@ -1010,7 +1010,7 @@ static __init int amd_ibs_init(void)
* all online cpus.
*/
cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
"AP_PERF_X86_AMD_IBS_STARTING",
"perf/x86/amd/ibs:STARTING",
x86_pmu_amd_ibs_starting_cpu,
x86_pmu_amd_ibs_dying_cpu);

View File

@ -291,7 +291,7 @@ static int __init amd_power_pmu_init(void)
cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_POWER_ONLINE,
"AP_PERF_X86_AMD_POWER_ONLINE",
"perf/x86/amd/power:online",
power_cpu_init, power_cpu_exit);
ret = perf_pmu_register(&pmu_class, "power", -1);

View File

@ -527,16 +527,16 @@ static int __init amd_uncore_init(void)
* Install callbacks. Core will call them for each online cpu.
*/
if (cpuhp_setup_state(CPUHP_PERF_X86_AMD_UNCORE_PREP,
"PERF_X86_AMD_UNCORE_PREP",
"perf/x86/amd/uncore:prepare",
amd_uncore_cpu_up_prepare, amd_uncore_cpu_dead))
goto fail_l2;
if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
"AP_PERF_X86_AMD_UNCORE_STARTING",
"perf/x86/amd/uncore:starting",
amd_uncore_cpu_starting, NULL))
goto fail_prep;
if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
"AP_PERF_X86_AMD_UNCORE_ONLINE",
"perf/x86/amd/uncore:online",
amd_uncore_cpu_online,
amd_uncore_cpu_down_prepare))
goto fail_start;

View File

@ -1820,18 +1820,18 @@ static int __init init_hw_perf_events(void)
* Install callbacks. Core will call them for each online
* cpu.
*/
err = cpuhp_setup_state(CPUHP_PERF_X86_PREPARE, "PERF_X86_PREPARE",
err = cpuhp_setup_state(CPUHP_PERF_X86_PREPARE, "perf/x86:prepare",
x86_pmu_prepare_cpu, x86_pmu_dead_cpu);
if (err)
return err;
err = cpuhp_setup_state(CPUHP_AP_PERF_X86_STARTING,
"AP_PERF_X86_STARTING", x86_pmu_starting_cpu,
"perf/x86:starting", x86_pmu_starting_cpu,
x86_pmu_dying_cpu);
if (err)
goto out;
err = cpuhp_setup_state(CPUHP_AP_PERF_X86_ONLINE, "AP_PERF_X86_ONLINE",
err = cpuhp_setup_state(CPUHP_AP_PERF_X86_ONLINE, "perf/x86:online",
x86_pmu_online_cpu, NULL);
if (err)
goto out1;

View File

@ -1747,9 +1747,9 @@ static int __init intel_cqm_init(void)
* is enabled to avoid notifier leak.
*/
cpuhp_setup_state(CPUHP_AP_PERF_X86_CQM_STARTING,
"AP_PERF_X86_CQM_STARTING",
"perf/x86/cqm:starting",
intel_cqm_cpu_starting, NULL);
cpuhp_setup_state(CPUHP_AP_PERF_X86_CQM_ONLINE, "AP_PERF_X86_CQM_ONLINE",
cpuhp_setup_state(CPUHP_AP_PERF_X86_CQM_ONLINE, "perf/x86/cqm:online",
NULL, intel_cqm_cpu_exit);
out:

View File

@ -594,6 +594,9 @@ static int __init cstate_probe(const struct cstate_model *cm)
static inline void cstate_cleanup(void)
{
cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_ONLINE);
cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_STARTING);
if (has_cstate_core)
perf_pmu_unregister(&cstate_core_pmu);
@ -606,16 +609,16 @@ static int __init cstate_init(void)
int err;
cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_STARTING,
"AP_PERF_X86_CSTATE_STARTING", cstate_cpu_init,
NULL);
"perf/x86/cstate:starting", cstate_cpu_init, NULL);
cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_ONLINE,
"AP_PERF_X86_CSTATE_ONLINE", NULL, cstate_cpu_exit);
"perf/x86/cstate:online", NULL, cstate_cpu_exit);
if (has_cstate_core) {
err = perf_pmu_register(&cstate_core_pmu, cstate_core_pmu.name, -1);
if (err) {
has_cstate_core = false;
pr_info("Failed to register cstate core pmu\n");
cstate_cleanup();
return err;
}
}
@ -629,8 +632,7 @@ static int __init cstate_init(void)
return err;
}
}
return err;
return 0;
}
static int __init cstate_pmu_init(void)
@ -655,8 +657,6 @@ module_init(cstate_pmu_init);
static void __exit cstate_pmu_exit(void)
{
cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_ONLINE);
cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_STARTING);
cstate_cleanup();
}
module_exit(cstate_pmu_exit);

View File

@ -803,13 +803,13 @@ static int __init rapl_pmu_init(void)
* Install callbacks. Core will call them for each online cpu.
*/
ret = cpuhp_setup_state(CPUHP_PERF_X86_RAPL_PREP, "PERF_X86_RAPL_PREP",
ret = cpuhp_setup_state(CPUHP_PERF_X86_RAPL_PREP, "perf/x86/rapl:prepare",
rapl_cpu_prepare, NULL);
if (ret)
goto out;
ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
"AP_PERF_X86_RAPL_ONLINE",
"perf/x86/rapl:online",
rapl_cpu_online, rapl_cpu_offline);
if (ret)
goto out1;

View File

@ -1398,22 +1398,22 @@ static int __init intel_uncore_init(void)
*/
if (!cret) {
ret = cpuhp_setup_state(CPUHP_PERF_X86_UNCORE_PREP,
"PERF_X86_UNCORE_PREP",
uncore_cpu_prepare, NULL);
"perf/x86/intel/uncore:prepare",
uncore_cpu_prepare, NULL);
if (ret)
goto err;
} else {
cpuhp_setup_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP,
"PERF_X86_UNCORE_PREP",
"perf/x86/intel/uncore:prepare",
uncore_cpu_prepare, NULL);
}
first_init = 1;
cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING,
"AP_PERF_X86_UNCORE_STARTING",
"perf/x86/uncore:starting",
uncore_cpu_starting, uncore_cpu_dying);
first_init = 0;
cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
"AP_PERF_X86_UNCORE_ONLINE",
"perf/x86/uncore:online",
uncore_event_cpu_online, uncore_event_cpu_offline);
return 0;

View File

@ -234,7 +234,7 @@ static __init int apbt_late_init(void)
if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT ||
!apb_timer_block_enabled)
return 0;
return cpuhp_setup_state(CPUHP_X86_APB_DEAD, "X86_APB_DEAD", NULL,
return cpuhp_setup_state(CPUHP_X86_APB_DEAD, "x86/apb:dead", NULL,
apbt_cpu_dead);
}
fs_initcall(apbt_late_init);

View File

@ -191,7 +191,7 @@ static int x2apic_cluster_probe(void)
if (!x2apic_mode)
return 0;
ret = cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "X2APIC_PREPARE",
ret = cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "x86/x2apic:prepare",
x2apic_prepare_cpu, x2apic_dead_cpu);
if (ret < 0) {
pr_err("Failed to register X2APIC_PREPARE\n");

View File

@ -1051,11 +1051,11 @@ static __init int hpet_late_init(void)
return 0;
/* This notifier should be called after workqueue is ready */
ret = cpuhp_setup_state(CPUHP_AP_X86_HPET_ONLINE, "AP_X86_HPET_ONLINE",
ret = cpuhp_setup_state(CPUHP_AP_X86_HPET_ONLINE, "x86/hpet:online",
hpet_cpuhp_online, NULL);
if (ret)
return ret;
ret = cpuhp_setup_state(CPUHP_X86_HPET_DEAD, "X86_HPET_DEAD", NULL,
ret = cpuhp_setup_state(CPUHP_X86_HPET_DEAD, "x86/hpet:dead", NULL,
hpet_cpuhp_dead);
if (ret)
goto err_cpuhp;

View File

@ -224,7 +224,6 @@ static int __init msr_init(void)
return 0;
out_class:
cpuhp_remove_state(cpuhp_msr_state);
class_destroy(msr_class);
out_chrdev:
__unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr");

View File

@ -408,7 +408,7 @@ static __init int tboot_late_init(void)
tboot_create_trampoline();
atomic_set(&ap_wfs_count, 0);
cpuhp_setup_state(CPUHP_AP_X86_TBOOT_DYING, "AP_X86_TBOOT_DYING", NULL,
cpuhp_setup_state(CPUHP_AP_X86_TBOOT_DYING, "x86/tboot:dying", NULL,
tboot_dying_cpu);
#ifdef CONFIG_DEBUG_FS
debugfs_create_file("tboot_log", S_IRUSR,

View File

@ -5855,7 +5855,7 @@ static void kvm_timer_init(void)
}
pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz);
cpuhp_setup_state(CPUHP_AP_X86_KVM_CLK_ONLINE, "AP_X86_KVM_CLK_ONLINE",
cpuhp_setup_state(CPUHP_AP_X86_KVM_CLK_ONLINE, "x86/kvm/clk:online",
kvmclock_cpu_online, kvmclock_cpu_down_prep);
}

View File

@ -1529,11 +1529,11 @@ static int xen_cpuhp_setup(void)
int rc;
rc = cpuhp_setup_state_nocalls(CPUHP_XEN_PREPARE,
"XEN_HVM_GUEST_PREPARE",
"x86/xen/hvm_guest:prepare",
xen_cpu_up_prepare, xen_cpu_dead);
if (rc >= 0) {
rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
"XEN_HVM_GUEST_ONLINE",
"x86/xen/hvm_guest:online",
xen_cpu_up_online, NULL);
if (rc < 0)
cpuhp_remove_state_nocalls(CPUHP_XEN_PREPARE);

View File

@ -422,7 +422,7 @@ static int __init xtensa_pmu_init(void)
int irq = irq_create_mapping(NULL, XCHAL_PROFILING_INTERRUPT);
ret = cpuhp_setup_state(CPUHP_AP_PERF_XTENSA_STARTING,
"AP_PERF_XTENSA_STARTING", xtensa_pmu_setup,
"perf/xtensa:starting", xtensa_pmu_setup,
NULL);
if (ret) {
pr_err("xtensa_pmu: failed to register CPU-hotplug.\n");

View File

@ -1796,7 +1796,7 @@ static int __init cci_platform_init(void)
int ret;
ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CCI_ONLINE,
"AP_PERF_ARM_CCI_ONLINE", NULL,
"perf/arm/cci:online", NULL,
cci_pmu_offline_cpu);
if (ret)
return ret;

View File

@ -1562,7 +1562,7 @@ static int __init arm_ccn_init(void)
int i, ret;
ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CCN_ONLINE,
"AP_PERF_ARM_CCN_ONLINE", NULL,
"perf/arm/ccn:online", NULL,
arm_ccn_pmu_offline_cpu);
if (ret)
return ret;
@ -1570,7 +1570,10 @@ static int __init arm_ccn_init(void)
for (i = 0; i < ARRAY_SIZE(arm_ccn_pmu_events); i++)
arm_ccn_pmu_events_attrs[i] = &arm_ccn_pmu_events[i].attr.attr;
return platform_driver_register(&arm_ccn_driver);
ret = platform_driver_register(&arm_ccn_driver);
if (ret)
cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE);
return ret;
}
static void __exit arm_ccn_exit(void)

View File

@ -309,7 +309,7 @@ static int __init arc_clockevent_setup(struct device_node *node)
}
ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING,
"AP_ARC_TIMER_STARTING",
"clockevents/arc/timer:starting",
arc_timer_starting_cpu,
arc_timer_dying_cpu);
if (ret) {

View File

@ -738,7 +738,7 @@ static int __init arch_timer_register(void)
/* Register and immediately configure the timer on the boot CPU */
err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
"AP_ARM_ARCH_TIMER_STARTING",
"clockevents/arm/arch_timer:starting",
arch_timer_starting_cpu, arch_timer_dying_cpu);
if (err)
goto out_unreg_cpupm;

View File

@ -316,7 +316,7 @@ static int __init global_timer_of_register(struct device_node *np)
goto out_irq;
err = cpuhp_setup_state(CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
"AP_ARM_GLOBAL_TIMER_STARTING",
"clockevents/arm/global_timer:starting",
gt_starting_cpu, gt_dying_cpu);
if (err)
goto out_irq;

View File

@ -34,7 +34,7 @@ static int dummy_timer_starting_cpu(unsigned int cpu)
static int __init dummy_timer_register(void)
{
return cpuhp_setup_state(CPUHP_AP_DUMMY_TIMER_STARTING,
"AP_DUMMY_TIMER_STARTING",
"clockevents/dummy_timer:starting",
dummy_timer_starting_cpu, NULL);
}
early_initcall(dummy_timer_register);

View File

@ -552,7 +552,7 @@ static int __init exynos4_timer_resources(struct device_node *np, void __iomem *
/* Install hotplug callbacks which configure the timer on this CPU */
err = cpuhp_setup_state(CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
"AP_EXYNOS4_MCT_TIMER_STARTING",
"clockevents/exynos4/mct_timer:starting",
exynos4_mct_starting_cpu,
exynos4_mct_dying_cpu);
if (err)

View File

@ -240,7 +240,7 @@ static int __init jcore_pit_init(struct device_node *node)
}
cpuhp_setup_state(CPUHP_AP_JCORE_TIMER_STARTING,
"AP_JCORE_TIMER_STARTING",
"clockevents/jcore:starting",
jcore_pit_local_init, NULL);
return 0;

View File

@ -154,6 +154,6 @@ int __init metag_generic_timer_init(void)
/* Hook cpu boot to configure the CPU's timers */
return cpuhp_setup_state(CPUHP_AP_METAG_TIMER_STARTING,
"AP_METAG_TIMER_STARTING",
"clockevents/metag:starting",
arch_timer_starting_cpu, NULL);
}

View File

@ -120,8 +120,8 @@ static int gic_clockevent_init(void)
}
cpuhp_setup_state(CPUHP_AP_MIPS_GIC_TIMER_STARTING,
"AP_MIPS_GIC_TIMER_STARTING", gic_starting_cpu,
gic_dying_cpu);
"clockevents/mips/gic/timer:starting",
gic_starting_cpu, gic_dying_cpu);
return 0;
}

View File

@ -182,7 +182,7 @@ static int __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq,
} else {
/* Install and invoke hotplug callbacks */
res = cpuhp_setup_state(CPUHP_AP_QCOM_TIMER_STARTING,
"AP_QCOM_TIMER_STARTING",
"clockevents/qcom/timer:starting",
msm_local_timer_starting_cpu,
msm_local_timer_dying_cpu);
if (res) {

View File

@ -320,7 +320,7 @@ static int __init armada_370_xp_timer_common_init(struct device_node *np)
}
res = cpuhp_setup_state(CPUHP_AP_ARMADA_TIMER_STARTING,
"AP_ARMADA_TIMER_STARTING",
"clockevents/armada:starting",
armada_370_xp_timer_starting_cpu,
armada_370_xp_timer_dying_cpu);
if (res) {

View File

@ -221,7 +221,7 @@ static int __init sirfsoc_clockevent_init(void)
/* Install and invoke hotplug callbacks */
return cpuhp_setup_state(CPUHP_AP_MARCO_TIMER_STARTING,
"AP_MARCO_TIMER_STARTING",
"clockevents/marco:starting",
sirfsoc_local_timer_starting_cpu,
sirfsoc_local_timer_dying_cpu);
}

View File

@ -804,10 +804,10 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
if (!etm_count++) {
cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING,
"AP_ARM_CORESIGHT_STARTING",
"arm/coresight:starting",
etm_starting_cpu, etm_dying_cpu);
ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
"AP_ARM_CORESIGHT_ONLINE",
"arm/coresight:online",
etm_online_cpu, NULL);
if (ret < 0)
goto err_arch_supported;

View File

@ -986,11 +986,11 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
dev_err(dev, "ETM arch init failed\n");
if (!etm4_count++) {
cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT4_STARTING,
"AP_ARM_CORESIGHT4_STARTING",
cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING,
"arm/coresight4:starting",
etm4_starting_cpu, etm4_dying_cpu);
ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
"AP_ARM_CORESIGHT4_ONLINE",
"arm/coresight4:online",
etm4_online_cpu, NULL);
if (ret < 0)
goto err_arch_supported;
@ -1037,7 +1037,7 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
err_arch_supported:
if (--etm4_count == 0) {
cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT4_STARTING);
cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
if (hp_online)
cpuhp_remove_state_nocalls(hp_online);
}

View File

@ -578,13 +578,13 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
#ifdef CONFIG_SMP
set_smp_cross_call(armada_mpic_send_doorbell);
cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_XP_STARTING,
"AP_IRQ_ARMADA_XP_STARTING",
"irqchip/armada/ipi:starting",
armada_xp_mpic_starting_cpu, NULL);
#endif
} else {
#ifdef CONFIG_SMP
cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_CASC_STARTING,
"AP_IRQ_ARMADA_CASC_STARTING",
cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_XP_STARTING,
"irqchip/armada/cascade:starting",
mpic_cascaded_starting_cpu, NULL);
#endif
irq_set_chained_handler(parent_irq,

View File

@ -245,7 +245,7 @@ bcm2836_arm_irqchip_smp_init(void)
#ifdef CONFIG_SMP
/* Unmask IPIs to the boot CPU. */
cpuhp_setup_state(CPUHP_AP_IRQ_BCM2836_STARTING,
"AP_IRQ_BCM2836_STARTING", bcm2836_cpu_starting,
"irqchip/bcm2836:starting", bcm2836_cpu_starting,
bcm2836_cpu_dying);
set_smp_cross_call(bcm2836_arm_irqchip_send_ipi);

View File

@ -632,9 +632,9 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
static void gic_smp_init(void)
{
set_smp_cross_call(gic_raise_softirq);
cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GICV3_STARTING,
"AP_IRQ_GICV3_STARTING", gic_starting_cpu,
NULL);
cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
"irqchip/arm/gicv3:starting",
gic_starting_cpu, NULL);
}
static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,

View File

@ -1191,7 +1191,7 @@ static int __init __gic_init_bases(struct gic_chip_data *gic,
set_smp_cross_call(gic_raise_softirq);
#endif
cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
"AP_IRQ_GIC_STARTING",
"irqchip/arm/gic:starting",
gic_starting_cpu, NULL);
set_handle_irq(gic_handle_irq);
if (static_key_true(&supports_deactivate))

View File

@ -407,7 +407,7 @@ hip04_of_init(struct device_node *node, struct device_node *parent)
set_handle_irq(hip04_handle_irq);
hip04_irq_dist_init(&hip04_data);
cpuhp_setup_state(CPUHP_AP_IRQ_HIP04_STARTING, "AP_IRQ_HIP04_STARTING",
cpuhp_setup_state(CPUHP_AP_IRQ_HIP04_STARTING, "irqchip/hip04:starting",
hip04_irq_starting_cpu, NULL);
return 0;
}

View File

@ -127,7 +127,7 @@ static int __init ledtrig_cpu_init(void)
register_syscore_ops(&ledtrig_cpu_syscore_ops);
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "AP_LEDTRIG_STARTING",
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "leds/trigger:starting",
ledtrig_online_cpu, ledtrig_prepare_down_cpu);
if (ret < 0)
pr_err("CPU hotplug notifier for ledtrig-cpu could not be registered: %d\n",

View File

@ -2484,13 +2484,13 @@ static __init int virtio_net_driver_init(void)
{
int ret;
ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "AP_VIRT_NET_ONLINE",
ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online",
virtnet_cpu_online,
virtnet_cpu_down_prep);
if (ret < 0)
goto out;
virtionet_online = ret;
ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "VIRT_NET_DEAD",
ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead",
NULL, virtnet_cpu_dead);
if (ret)
goto err_dead;

View File

@ -1084,7 +1084,7 @@ static int arm_pmu_hp_init(void)
int ret;
ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING,
"AP_PERF_ARM_STARTING",
"perf/arm/pmu:starting",
arm_perf_starting_cpu, NULL);
if (ret)
pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n",

View File

@ -127,13 +127,6 @@ module_param_named(log_fka, bnx2fc_log_fka, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(log_fka, " Print message to kernel log when fcoe is "
"initiating a FIP keep alive when debug logging is enabled.");
static int bnx2fc_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu);
/* notification function for CPU hotplug events */
static struct notifier_block bnx2fc_cpu_notifier = {
.notifier_call = bnx2fc_cpu_callback,
};
static inline struct net_device *bnx2fc_netdev(const struct fc_lport *lport)
{
return ((struct bnx2fc_interface *)
@ -2622,37 +2615,19 @@ static void bnx2fc_percpu_thread_destroy(unsigned int cpu)
kthread_stop(thread);
}
/**
* bnx2fc_cpu_callback - Handler for CPU hotplug events
*
* @nfb: The callback data block
* @action: The event triggering the callback
* @hcpu: The index of the CPU that the event is for
*
* This creates or destroys per-CPU data for fcoe
*
* Returns NOTIFY_OK always.
*/
static int bnx2fc_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned cpu = (unsigned long)hcpu;
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
printk(PFX "CPU %x online: Create Rx thread\n", cpu);
bnx2fc_percpu_thread_create(cpu);
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
printk(PFX "CPU %x offline: Remove Rx thread\n", cpu);
bnx2fc_percpu_thread_destroy(cpu);
break;
default:
break;
}
return NOTIFY_OK;
static int bnx2fc_cpu_online(unsigned int cpu)
{
printk(PFX "CPU %x online: Create Rx thread\n", cpu);
bnx2fc_percpu_thread_create(cpu);
return 0;
}
static int bnx2fc_cpu_dead(unsigned int cpu)
{
printk(PFX "CPU %x offline: Remove Rx thread\n", cpu);
bnx2fc_percpu_thread_destroy(cpu);
return 0;
}
static int bnx2fc_slave_configure(struct scsi_device *sdev)
@ -2664,6 +2639,8 @@ static int bnx2fc_slave_configure(struct scsi_device *sdev)
return 0;
}
static enum cpuhp_state bnx2fc_online_state;
/**
* bnx2fc_mod_init - module init entry point
*
@ -2724,21 +2701,31 @@ static int __init bnx2fc_mod_init(void)
spin_lock_init(&p->fp_work_lock);
}
cpu_notifier_register_begin();
get_online_cpus();
for_each_online_cpu(cpu) {
for_each_online_cpu(cpu)
bnx2fc_percpu_thread_create(cpu);
}
/* Initialize per CPU interrupt thread */
__register_hotcpu_notifier(&bnx2fc_cpu_notifier);
rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
"scsi/bnx2fc:online",
bnx2fc_cpu_online, NULL);
if (rc < 0)
goto stop_threads;
bnx2fc_online_state = rc;
cpu_notifier_register_done();
cpuhp_setup_state_nocalls(CPUHP_SCSI_BNX2FC_DEAD, "scsi/bnx2fc:dead",
NULL, bnx2fc_cpu_dead);
put_online_cpus();
cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb);
return 0;
stop_threads:
for_each_online_cpu(cpu)
bnx2fc_percpu_thread_destroy(cpu);
put_online_cpus();
kthread_stop(l2_thread);
free_wq:
destroy_workqueue(bnx2fc_wq);
release_bt:
@ -2797,16 +2784,16 @@ static void __exit bnx2fc_mod_exit(void)
if (l2_thread)
kthread_stop(l2_thread);
cpu_notifier_register_begin();
get_online_cpus();
/* Destroy per cpu threads */
for_each_online_cpu(cpu) {
bnx2fc_percpu_thread_destroy(cpu);
}
__unregister_hotcpu_notifier(&bnx2fc_cpu_notifier);
cpuhp_remove_state_nocalls(bnx2fc_online_state);
cpuhp_remove_state_nocalls(CPUHP_SCSI_BNX2FC_DEAD);
cpu_notifier_register_done();
put_online_cpus();
destroy_workqueue(bnx2fc_wq);
/*

View File

@ -70,14 +70,6 @@ u64 iscsi_error_mask = 0x00;
DEFINE_PER_CPU(struct bnx2i_percpu_s, bnx2i_percpu);
static int bnx2i_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu);
/* notification function for CPU hotplug events */
static struct notifier_block bnx2i_cpu_notifier = {
.notifier_call = bnx2i_cpu_callback,
};
/**
* bnx2i_identify_device - identifies NetXtreme II device type
* @hba: Adapter structure pointer
@ -461,41 +453,21 @@ static void bnx2i_percpu_thread_destroy(unsigned int cpu)
kthread_stop(thread);
}
/**
* bnx2i_cpu_callback - Handler for CPU hotplug events
*
* @nfb: The callback data block
* @action: The event triggering the callback
* @hcpu: The index of the CPU that the event is for
*
* This creates or destroys per-CPU data for iSCSI
*
* Returns NOTIFY_OK always.
*/
static int bnx2i_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
static int bnx2i_cpu_online(unsigned int cpu)
{
unsigned cpu = (unsigned long)hcpu;
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
printk(KERN_INFO "bnx2i: CPU %x online: Create Rx thread\n",
cpu);
bnx2i_percpu_thread_create(cpu);
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
printk(KERN_INFO "CPU %x offline: Remove Rx thread\n", cpu);
bnx2i_percpu_thread_destroy(cpu);
break;
default:
break;
}
return NOTIFY_OK;
pr_info("bnx2i: CPU %x online: Create Rx thread\n", cpu);
bnx2i_percpu_thread_create(cpu);
return 0;
}
static int bnx2i_cpu_dead(unsigned int cpu)
{
pr_info("CPU %x offline: Remove Rx thread\n", cpu);
bnx2i_percpu_thread_destroy(cpu);
return 0;
}
static enum cpuhp_state bnx2i_online_state;
/**
* bnx2i_mod_init - module init entry point
@ -539,18 +511,28 @@ static int __init bnx2i_mod_init(void)
p->iothread = NULL;
}
cpu_notifier_register_begin();
get_online_cpus();
for_each_online_cpu(cpu)
bnx2i_percpu_thread_create(cpu);
/* Initialize per CPU interrupt thread */
__register_hotcpu_notifier(&bnx2i_cpu_notifier);
cpu_notifier_register_done();
err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
"scsi/bnx2i:online",
bnx2i_cpu_online, NULL);
if (err < 0)
goto remove_threads;
bnx2i_online_state = err;
cpuhp_setup_state_nocalls(CPUHP_SCSI_BNX2I_DEAD, "scsi/bnx2i:dead",
NULL, bnx2i_cpu_dead);
put_online_cpus();
return 0;
remove_threads:
for_each_online_cpu(cpu)
bnx2i_percpu_thread_destroy(cpu);
put_online_cpus();
cnic_unregister_driver(CNIC_ULP_ISCSI);
unreg_xport:
iscsi_unregister_transport(&bnx2i_iscsi_transport);
out:
@ -587,14 +569,14 @@ static void __exit bnx2i_mod_exit(void)
}
mutex_unlock(&bnx2i_dev_lock);
cpu_notifier_register_begin();
get_online_cpus();
for_each_online_cpu(cpu)
bnx2i_percpu_thread_destroy(cpu);
__unregister_hotcpu_notifier(&bnx2i_cpu_notifier);
cpu_notifier_register_done();
cpuhp_remove_state_nocalls(bnx2i_online_state);
cpuhp_remove_state_nocalls(CPUHP_SCSI_BNX2I_DEAD);
put_online_cpus();
iscsi_unregister_transport(&bnx2i_iscsi_transport);
cnic_unregister_driver(CNIC_ULP_ISCSI);

View File

@ -1612,30 +1612,29 @@ static int qedi_percpu_io_thread(void *arg)
return 0;
}
static void qedi_percpu_thread_create(unsigned int cpu)
static int qedi_cpu_online(unsigned int cpu)
{
struct qedi_percpu_s *p;
struct qedi_percpu_s *p = this_cpu_ptr(&qedi_percpu);
struct task_struct *thread;
p = &per_cpu(qedi_percpu, cpu);
thread = kthread_create_on_node(qedi_percpu_io_thread, (void *)p,
cpu_to_node(cpu),
"qedi_thread/%d", cpu);
if (likely(!IS_ERR(thread))) {
kthread_bind(thread, cpu);
p->iothread = thread;
wake_up_process(thread);
}
if (IS_ERR(thread))
return PTR_ERR(thread);
kthread_bind(thread, cpu);
p->iothread = thread;
wake_up_process(thread);
return 0;
}
static void qedi_percpu_thread_destroy(unsigned int cpu)
static int qedi_cpu_offline(unsigned int cpu)
{
struct qedi_percpu_s *p;
struct task_struct *thread;
struct qedi_percpu_s *p = this_cpu_ptr(&qedi_percpu);
struct qedi_work *work, *tmp;
struct task_struct *thread;
p = &per_cpu(qedi_percpu, cpu);
spin_lock_bh(&p->p_work_lock);
thread = p->iothread;
p->iothread = NULL;
@ -1650,35 +1649,9 @@ static void qedi_percpu_thread_destroy(unsigned int cpu)
spin_unlock_bh(&p->p_work_lock);
if (thread)
kthread_stop(thread);
return 0;
}
static int qedi_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
QEDI_ERR(NULL, "CPU %d online.\n", cpu);
qedi_percpu_thread_create(cpu);
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
QEDI_ERR(NULL, "CPU %d offline.\n", cpu);
qedi_percpu_thread_destroy(cpu);
break;
default:
break;
}
return NOTIFY_OK;
}
static struct notifier_block qedi_cpu_notifier = {
.notifier_call = qedi_cpu_callback,
};
void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu)
{
struct qed_ll2_params params;
@ -2038,6 +2011,8 @@ static struct pci_device_id qedi_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, qedi_pci_tbl);
static enum cpuhp_state qedi_cpuhp_state;
static struct pci_driver qedi_pci_driver = {
.name = QEDI_MODULE_NAME,
.id_table = qedi_pci_tbl,
@ -2047,16 +2022,13 @@ static struct pci_driver qedi_pci_driver = {
static int __init qedi_init(void)
{
int rc = 0;
int ret;
struct qedi_percpu_s *p;
unsigned int cpu = 0;
int cpu, rc = 0;
qedi_ops = qed_get_iscsi_ops();
if (!qedi_ops) {
QEDI_ERR(NULL, "Failed to get qed iSCSI operations\n");
rc = -EINVAL;
goto exit_qedi_init_0;
return -EINVAL;
}
#ifdef CONFIG_DEBUG_FS
@ -2070,15 +2042,6 @@ static int __init qedi_init(void)
goto exit_qedi_init_1;
}
register_hotcpu_notifier(&qedi_cpu_notifier);
ret = pci_register_driver(&qedi_pci_driver);
if (ret) {
QEDI_ERR(NULL, "Failed to register driver\n");
rc = -EINVAL;
goto exit_qedi_init_2;
}
for_each_possible_cpu(cpu) {
p = &per_cpu(qedi_percpu, cpu);
INIT_LIST_HEAD(&p->work_list);
@ -2086,11 +2049,22 @@ static int __init qedi_init(void)
p->iothread = NULL;
}
for_each_online_cpu(cpu)
qedi_percpu_thread_create(cpu);
rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/qedi:online",
qedi_cpu_online, qedi_cpu_offline);
if (rc < 0)
goto exit_qedi_init_2;
qedi_cpuhp_state = rc;
return rc;
rc = pci_register_driver(&qedi_pci_driver);
if (rc) {
QEDI_ERR(NULL, "Failed to register driver\n");
goto exit_qedi_hp;
}
return 0;
exit_qedi_hp:
cpuhp_remove_state(qedi_cpuhp_state);
exit_qedi_init_2:
iscsi_unregister_transport(&qedi_iscsi_transport);
exit_qedi_init_1:
@ -2098,19 +2072,13 @@ static int __init qedi_init(void)
qedi_dbg_exit();
#endif
qed_put_iscsi_ops();
exit_qedi_init_0:
return rc;
}
static void __exit qedi_cleanup(void)
{
unsigned int cpu = 0;
for_each_online_cpu(cpu)
qedi_percpu_thread_destroy(cpu);
pci_unregister_driver(&qedi_pci_driver);
unregister_hotcpu_notifier(&qedi_cpu_notifier);
cpuhp_remove_state(qedi_cpuhp_state);
iscsi_unregister_transport(&qedi_iscsi_transport);
#ifdef CONFIG_DEBUG_FS

View File

@ -967,48 +967,38 @@ cfs_cpt_table_create_pattern(char *pattern)
}
#ifdef CONFIG_HOTPLUG_CPU
static int
cfs_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
static enum cpuhp_state lustre_cpu_online;
static void cfs_cpu_incr_cpt_version(void)
{
unsigned int cpu = (unsigned long)hcpu;
bool warn;
switch (action) {
case CPU_DEAD:
case CPU_DEAD_FROZEN:
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
spin_lock(&cpt_data.cpt_lock);
cpt_data.cpt_version++;
spin_unlock(&cpt_data.cpt_lock);
/* Fall through */
default:
if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) {
CDEBUG(D_INFO, "CPU changed [cpu %u action %lx]\n",
cpu, action);
break;
}
mutex_lock(&cpt_data.cpt_mutex);
/* if all HTs in a core are offline, it may break affinity */
cpumask_copy(cpt_data.cpt_cpumask,
topology_sibling_cpumask(cpu));
warn = cpumask_any_and(cpt_data.cpt_cpumask,
cpu_online_mask) >= nr_cpu_ids;
mutex_unlock(&cpt_data.cpt_mutex);
CDEBUG(warn ? D_WARNING : D_INFO,
"Lustre: can't support CPU plug-out well now, performance and stability could be impacted [CPU %u action: %lx]\n",
cpu, action);
}
return NOTIFY_OK;
spin_lock(&cpt_data.cpt_lock);
cpt_data.cpt_version++;
spin_unlock(&cpt_data.cpt_lock);
}
static struct notifier_block cfs_cpu_notifier = {
.notifier_call = cfs_cpu_notify,
.priority = 0
};
static int cfs_cpu_online(unsigned int cpu)
{
cfs_cpu_incr_cpt_version();
return 0;
}
static int cfs_cpu_dead(unsigned int cpu)
{
bool warn;
cfs_cpu_incr_cpt_version();
mutex_lock(&cpt_data.cpt_mutex);
/* if all HTs in a core are offline, it may break affinity */
cpumask_copy(cpt_data.cpt_cpumask, topology_sibling_cpumask(cpu));
warn = cpumask_any_and(cpt_data.cpt_cpumask,
cpu_online_mask) >= nr_cpu_ids;
mutex_unlock(&cpt_data.cpt_mutex);
CDEBUG(warn ? D_WARNING : D_INFO,
"Lustre: can't support CPU plug-out well now, performance and stability could be impacted [CPU %u]\n",
cpu);
return 0;
}
#endif
void
@ -1018,7 +1008,9 @@ cfs_cpu_fini(void)
cfs_cpt_table_free(cfs_cpt_table);
#ifdef CONFIG_HOTPLUG_CPU
unregister_hotcpu_notifier(&cfs_cpu_notifier);
if (lustre_cpu_online > 0)
cpuhp_remove_state_nocalls(lustre_cpu_online);
cpuhp_remove_state_nocalls(CPUHP_LUSTRE_CFS_DEAD);
#endif
if (cpt_data.cpt_cpumask)
LIBCFS_FREE(cpt_data.cpt_cpumask, cpumask_size());
@ -1027,6 +1019,8 @@ cfs_cpu_fini(void)
int
cfs_cpu_init(void)
{
int ret = 0;
LASSERT(!cfs_cpt_table);
memset(&cpt_data, 0, sizeof(cpt_data));
@ -1041,8 +1035,19 @@ cfs_cpu_init(void)
mutex_init(&cpt_data.cpt_mutex);
#ifdef CONFIG_HOTPLUG_CPU
register_hotcpu_notifier(&cfs_cpu_notifier);
ret = cpuhp_setup_state_nocalls(CPUHP_LUSTRE_CFS_DEAD,
"staging/lustre/cfe:dead", NULL,
cfs_cpu_dead);
if (ret < 0)
goto failed;
ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
"staging/lustre/cfe:online",
cfs_cpu_online, NULL);
if (ret < 0)
goto failed;
lustre_cpu_online = ret;
#endif
ret = -EINVAL;
if (*cpu_pattern) {
cfs_cpt_table = cfs_cpt_table_create_pattern(cpu_pattern);
@ -1075,7 +1080,7 @@ cfs_cpu_init(void)
failed:
cfs_cpu_fini();
return -1;
return ret;
}
#endif

View File

@ -445,7 +445,7 @@ int __init xen_evtchn_fifo_init(void)
evtchn_ops = &evtchn_ops_fifo;
cpuhp_setup_state_nocalls(CPUHP_XEN_EVTCHN_PREPARE,
"CPUHP_XEN_EVTCHN_PREPARE",
"xen/evtchn:prepare",
xen_evtchn_cpu_prepare, xen_evtchn_cpu_dead);
out:
put_cpu();

View File

@ -57,9 +57,6 @@ struct notifier_block;
#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
#define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */
#define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */
#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */
#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
* lock is dropped */
@ -80,80 +77,14 @@ struct notifier_block;
#ifdef CONFIG_SMP
extern bool cpuhp_tasks_frozen;
/* Need to know about CPUs going up/down? */
#if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
#define cpu_notifier(fn, pri) { \
static struct notifier_block fn##_nb = \
{ .notifier_call = fn, .priority = pri }; \
register_cpu_notifier(&fn##_nb); \
}
#define __cpu_notifier(fn, pri) { \
static struct notifier_block fn##_nb = \
{ .notifier_call = fn, .priority = pri }; \
__register_cpu_notifier(&fn##_nb); \
}
extern int register_cpu_notifier(struct notifier_block *nb);
extern int __register_cpu_notifier(struct notifier_block *nb);
extern void unregister_cpu_notifier(struct notifier_block *nb);
extern void __unregister_cpu_notifier(struct notifier_block *nb);
#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0)
static inline int register_cpu_notifier(struct notifier_block *nb)
{
return 0;
}
static inline int __register_cpu_notifier(struct notifier_block *nb)
{
return 0;
}
static inline void unregister_cpu_notifier(struct notifier_block *nb)
{
}
static inline void __unregister_cpu_notifier(struct notifier_block *nb)
{
}
#endif
int cpu_up(unsigned int cpu);
void notify_cpu_starting(unsigned int cpu);
extern void cpu_maps_update_begin(void);
extern void cpu_maps_update_done(void);
#define cpu_notifier_register_begin cpu_maps_update_begin
#define cpu_notifier_register_done cpu_maps_update_done
#else /* CONFIG_SMP */
#define cpuhp_tasks_frozen 0
#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0)
static inline int register_cpu_notifier(struct notifier_block *nb)
{
return 0;
}
static inline int __register_cpu_notifier(struct notifier_block *nb)
{
return 0;
}
static inline void unregister_cpu_notifier(struct notifier_block *nb)
{
}
static inline void __unregister_cpu_notifier(struct notifier_block *nb)
{
}
static inline void cpu_maps_update_begin(void)
{
}
@ -162,14 +93,6 @@ static inline void cpu_maps_update_done(void)
{
}
static inline void cpu_notifier_register_begin(void)
{
}
static inline void cpu_notifier_register_done(void)
{
}
#endif /* CONFIG_SMP */
extern struct bus_type cpu_subsys;
@ -182,12 +105,6 @@ extern void get_online_cpus(void);
extern void put_online_cpus(void);
extern void cpu_hotplug_disable(void);
extern void cpu_hotplug_enable(void);
#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
#define __hotcpu_notifier(fn, pri) __cpu_notifier(fn, pri)
#define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
#define __register_hotcpu_notifier(nb) __register_cpu_notifier(nb)
#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
#define __unregister_hotcpu_notifier(nb) __unregister_cpu_notifier(nb)
void clear_tasks_mm_cpumask(int cpu);
int cpu_down(unsigned int cpu);
@ -199,13 +116,6 @@ static inline void cpu_hotplug_done(void) {}
#define put_online_cpus() do { } while (0)
#define cpu_hotplug_disable() do { } while (0)
#define cpu_hotplug_enable() do { } while (0)
#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
#define __hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
/* These aren't inline functions due to a GCC bug. */
#define register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
#define __register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
#define unregister_hotcpu_notifier(nb) ({ (void)(nb); })
#define __unregister_hotcpu_notifier(nb) ({ (void)(nb); })
#endif /* CONFIG_HOTPLUG_CPU */
#ifdef CONFIG_PM_SLEEP_SMP

View File

@ -41,6 +41,9 @@ enum cpuhp_state {
CPUHP_NET_DEV_DEAD,
CPUHP_PCI_XGENE_DEAD,
CPUHP_IOMMU_INTEL_DEAD,
CPUHP_LUSTRE_CFS_DEAD,
CPUHP_SCSI_BNX2FC_DEAD,
CPUHP_SCSI_BNX2I_DEAD,
CPUHP_WORKQUEUE_PREP,
CPUHP_POWER_NUMA_PREPARE,
CPUHP_HRTIMERS_PREPARE,
@ -56,7 +59,6 @@ enum cpuhp_state {
CPUHP_POWERPC_MMU_CTX_PREPARE,
CPUHP_XEN_PREPARE,
CPUHP_XEN_EVTCHN_PREPARE,
CPUHP_NOTIFY_PREPARE,
CPUHP_ARM_SHMOBILE_SCU_PREPARE,
CPUHP_SH_SH3X_PREPARE,
CPUHP_BLK_MQ_PREPARE,
@ -71,7 +73,6 @@ enum cpuhp_state {
CPUHP_KVM_PPC_BOOK3S_PREPARE,
CPUHP_ZCOMP_PREPARE,
CPUHP_TIMERS_DEAD,
CPUHP_NOTF_ERR_INJ_PREPARE,
CPUHP_MIPS_SOC_PREPARE,
CPUHP_BRINGUP_CPU,
CPUHP_AP_IDLE_DEAD,
@ -79,10 +80,8 @@ enum cpuhp_state {
CPUHP_AP_SCHED_STARTING,
CPUHP_AP_RCUTREE_DYING,
CPUHP_AP_IRQ_GIC_STARTING,
CPUHP_AP_IRQ_GICV3_STARTING,
CPUHP_AP_IRQ_HIP04_STARTING,
CPUHP_AP_IRQ_ARMADA_XP_STARTING,
CPUHP_AP_IRQ_ARMADA_CASC_STARTING,
CPUHP_AP_IRQ_BCM2836_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY,
CPUHP_AP_PERF_X86_UNCORE_STARTING,
@ -118,7 +117,6 @@ enum cpuhp_state {
CPUHP_AP_DUMMY_TIMER_STARTING,
CPUHP_AP_ARM_XEN_STARTING,
CPUHP_AP_ARM_CORESIGHT_STARTING,
CPUHP_AP_ARM_CORESIGHT4_STARTING,
CPUHP_AP_ARM64_ISNDEP_STARTING,
CPUHP_AP_SMPCFD_DYING,
CPUHP_AP_X86_TBOOT_DYING,
@ -142,7 +140,6 @@ enum cpuhp_state {
CPUHP_AP_PERF_ARM_L2X0_ONLINE,
CPUHP_AP_WORKQUEUE_ONLINE,
CPUHP_AP_RCUTREE_ONLINE,
CPUHP_AP_NOTIFY_ONLINE,
CPUHP_AP_ONLINE_DYN,
CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
CPUHP_AP_X86_HPET_ONLINE,

View File

@ -183,23 +183,16 @@ EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
/*
* The following two APIs (cpu_maps_update_begin/done) must be used when
* attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
* The APIs cpu_notifier_register_begin/done() must be used to protect CPU
* hotplug callback (un)registration performed using __register_cpu_notifier()
* or __unregister_cpu_notifier().
*/
void cpu_maps_update_begin(void)
{
mutex_lock(&cpu_add_remove_lock);
}
EXPORT_SYMBOL(cpu_notifier_register_begin);
void cpu_maps_update_done(void)
{
mutex_unlock(&cpu_add_remove_lock);
}
EXPORT_SYMBOL(cpu_notifier_register_done);
static RAW_NOTIFIER_HEAD(cpu_chain);
/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
* Should always be manipulated under cpu_add_remove_lock
@ -349,66 +342,7 @@ void cpu_hotplug_enable(void)
EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
#endif /* CONFIG_HOTPLUG_CPU */
/* Need to know about CPUs going up/down? */
int register_cpu_notifier(struct notifier_block *nb)
{
int ret;
cpu_maps_update_begin();
ret = raw_notifier_chain_register(&cpu_chain, nb);
cpu_maps_update_done();
return ret;
}
int __register_cpu_notifier(struct notifier_block *nb)
{
return raw_notifier_chain_register(&cpu_chain, nb);
}
static int __cpu_notify(unsigned long val, unsigned int cpu, int nr_to_call,
int *nr_calls)
{
unsigned long mod = cpuhp_tasks_frozen ? CPU_TASKS_FROZEN : 0;
void *hcpu = (void *)(long)cpu;
int ret;
ret = __raw_notifier_call_chain(&cpu_chain, val | mod, hcpu, nr_to_call,
nr_calls);
return notifier_to_errno(ret);
}
static int cpu_notify(unsigned long val, unsigned int cpu)
{
return __cpu_notify(val, cpu, -1, NULL);
}
static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
{
BUG_ON(cpu_notify(val, cpu));
}
/* Notifier wrappers for transitioning to state machine */
static int notify_prepare(unsigned int cpu)
{
int nr_calls = 0;
int ret;
ret = __cpu_notify(CPU_UP_PREPARE, cpu, -1, &nr_calls);
if (ret) {
nr_calls--;
printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
__func__, cpu);
__cpu_notify(CPU_UP_CANCELED, cpu, nr_calls, NULL);
}
return ret;
}
static int notify_online(unsigned int cpu)
{
cpu_notify(CPU_ONLINE, cpu);
return 0;
}
static int bringup_wait_for_ap(unsigned int cpu)
{
@ -433,10 +367,8 @@ static int bringup_cpu(unsigned int cpu)
/* Arch-specific enabling code. */
ret = __cpu_up(cpu, idle);
irq_unlock_sparse();
if (ret) {
cpu_notify(CPU_UP_CANCELED, cpu);
if (ret)
return ret;
}
ret = bringup_wait_for_ap(cpu);
BUG_ON(!cpu_online(cpu));
return ret;
@ -565,11 +497,6 @@ static void cpuhp_thread_fun(unsigned int cpu)
BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
undo_cpu_down(cpu, st);
/*
* This is a momentary workaround to keep the notifier users
* happy. Will go away once we got rid of the notifiers.
*/
cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
st->rollback = false;
} else {
/* Cannot happen .... */
@ -659,22 +586,6 @@ void __init cpuhp_threads_init(void)
kthread_unpark(this_cpu_read(cpuhp_state.thread));
}
EXPORT_SYMBOL(register_cpu_notifier);
EXPORT_SYMBOL(__register_cpu_notifier);
void unregister_cpu_notifier(struct notifier_block *nb)
{
cpu_maps_update_begin();
raw_notifier_chain_unregister(&cpu_chain, nb);
cpu_maps_update_done();
}
EXPORT_SYMBOL(unregister_cpu_notifier);
void __unregister_cpu_notifier(struct notifier_block *nb)
{
raw_notifier_chain_unregister(&cpu_chain, nb);
}
EXPORT_SYMBOL(__unregister_cpu_notifier);
#ifdef CONFIG_HOTPLUG_CPU
/**
* clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
@ -741,20 +652,6 @@ static inline void check_for_tasks(int dead_cpu)
read_unlock(&tasklist_lock);
}
static int notify_down_prepare(unsigned int cpu)
{
int err, nr_calls = 0;
err = __cpu_notify(CPU_DOWN_PREPARE, cpu, -1, &nr_calls);
if (err) {
nr_calls--;
__cpu_notify(CPU_DOWN_FAILED, cpu, nr_calls, NULL);
pr_warn("%s: attempt to take down CPU %u failed\n",
__func__, cpu);
}
return err;
}
/* Take this CPU down. */
static int take_cpu_down(void *_param)
{
@ -833,13 +730,6 @@ static int takedown_cpu(unsigned int cpu)
return 0;
}
static int notify_dead(unsigned int cpu)
{
cpu_notify_nofail(CPU_DEAD, cpu);
check_for_tasks(cpu);
return 0;
}
static void cpuhp_complete_idle_dead(void *arg)
{
struct cpuhp_cpu_state *st = arg;
@ -863,9 +753,7 @@ void cpuhp_report_idle_dead(void)
}
#else
#define notify_down_prepare NULL
#define takedown_cpu NULL
#define notify_dead NULL
#endif
#ifdef CONFIG_HOTPLUG_CPU
@ -924,9 +812,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
out:
cpu_hotplug_done();
/* This post dead nonsense must die */
if (!ret && hasdied)
cpu_notify_nofail(CPU_POST_DEAD, cpu);
return ret;
}
@ -1291,17 +1176,6 @@ static struct cpuhp_step cpuhp_bp_states[] = {
.startup.single = rcutree_prepare_cpu,
.teardown.single = rcutree_dead_cpu,
},
/*
* Preparatory and dead notifiers. Will be replaced once the notifiers
* are converted to states.
*/
[CPUHP_NOTIFY_PREPARE] = {
.name = "notify:prepare",
.startup.single = notify_prepare,
.teardown.single = notify_dead,
.skip_onerr = true,
.cant_stop = true,
},
/*
* On the tear-down path, timers_dead_cpu() must be invoked
* before blk_mq_queue_reinit_notify() from notify_dead(),
@ -1391,17 +1265,6 @@ static struct cpuhp_step cpuhp_ap_states[] = {
.startup.single = rcutree_online_cpu,
.teardown.single = rcutree_offline_cpu,
},
/*
* Online/down_prepare notifiers. Will be removed once the notifiers
* are converted to states.
*/
[CPUHP_AP_NOTIFY_ONLINE] = {
.name = "notify:online",
.startup.single = notify_online,
.teardown.single = notify_down_prepare,
.skip_onerr = true,
},
#endif
/*
* The dynamically registered state space is here
@ -1432,23 +1295,53 @@ static int cpuhp_cb_check(enum cpuhp_state state)
return 0;
}
static void cpuhp_store_callbacks(enum cpuhp_state state,
const char *name,
int (*startup)(unsigned int cpu),
int (*teardown)(unsigned int cpu),
bool multi_instance)
/*
* Returns a free for dynamic slot assignment of the Online state. The states
* are protected by the cpuhp_slot_states mutex and an empty slot is identified
* by having no name assigned.
*/
static int cpuhp_reserve_state(enum cpuhp_state state)
{
enum cpuhp_state i;
for (i = CPUHP_AP_ONLINE_DYN; i <= CPUHP_AP_ONLINE_DYN_END; i++) {
if (!cpuhp_ap_states[i].name)
return i;
}
WARN(1, "No more dynamic states available for CPU hotplug\n");
return -ENOSPC;
}
static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
int (*startup)(unsigned int cpu),
int (*teardown)(unsigned int cpu),
bool multi_instance)
{
/* (Un)Install the callbacks for further cpu hotplug operations */
struct cpuhp_step *sp;
int ret = 0;
mutex_lock(&cpuhp_state_mutex);
if (state == CPUHP_AP_ONLINE_DYN) {
ret = cpuhp_reserve_state(state);
if (ret < 0)
goto out;
state = ret;
}
sp = cpuhp_get_step(state);
if (name && sp->name) {
ret = -EBUSY;
goto out;
}
sp->startup.single = startup;
sp->teardown.single = teardown;
sp->name = name;
sp->multi_instance = multi_instance;
INIT_HLIST_HEAD(&sp->list);
out:
mutex_unlock(&cpuhp_state_mutex);
return ret;
}
static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
@ -1509,29 +1402,6 @@ static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
}
}
/*
* Returns a free for dynamic slot assignment of the Online state. The states
* are protected by the cpuhp_slot_states mutex and an empty slot is identified
* by having no name assigned.
*/
static int cpuhp_reserve_state(enum cpuhp_state state)
{
enum cpuhp_state i;
mutex_lock(&cpuhp_state_mutex);
for (i = CPUHP_AP_ONLINE_DYN; i <= CPUHP_AP_ONLINE_DYN_END; i++) {
if (cpuhp_ap_states[i].name)
continue;
cpuhp_ap_states[i].name = "Reserved";
mutex_unlock(&cpuhp_state_mutex);
return i;
}
mutex_unlock(&cpuhp_state_mutex);
WARN(1, "No more dynamic states available for CPU hotplug\n");
return -ENOSPC;
}
int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
bool invoke)
{
@ -1580,11 +1450,13 @@ EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
/**
* __cpuhp_setup_state - Setup the callbacks for an hotplug machine state
* @state: The state to setup
* @invoke: If true, the startup function is invoked for cpus where
* cpu state >= @state
* @startup: startup callback function
* @teardown: teardown callback function
* @state: The state to setup
* @invoke: If true, the startup function is invoked for cpus where
* cpu state >= @state
* @startup: startup callback function
* @teardown: teardown callback function
* @multi_instance: State is set up for multiple instances which get
* added afterwards.
*
* Returns:
* On success:
@ -1599,25 +1471,16 @@ int __cpuhp_setup_state(enum cpuhp_state state,
bool multi_instance)
{
int cpu, ret = 0;
int dyn_state = 0;
if (cpuhp_cb_check(state) || !name)
return -EINVAL;
get_online_cpus();
/* currently assignments for the ONLINE state are possible */
if (state == CPUHP_AP_ONLINE_DYN) {
dyn_state = 1;
ret = cpuhp_reserve_state(state);
if (ret < 0)
goto out;
state = ret;
}
ret = cpuhp_store_callbacks(state, name, startup, teardown,
multi_instance);
cpuhp_store_callbacks(state, name, startup, teardown, multi_instance);
if (!invoke || !startup)
if (ret || !invoke || !startup)
goto out;
/*
@ -1641,7 +1504,11 @@ int __cpuhp_setup_state(enum cpuhp_state state,
}
out:
put_online_cpus();
if (!ret && dyn_state)
/*
* If the requested state is CPUHP_AP_ONLINE_DYN, return the
* dynamically allocated state in case of success.
*/
if (!ret && state == CPUHP_AP_ONLINE_DYN)
return state;
return ret;
}

View File

@ -1538,30 +1538,6 @@ config NOTIFIER_ERROR_INJECTION
Say N if unsure.
config CPU_NOTIFIER_ERROR_INJECT
tristate "CPU notifier error injection module"
depends on HOTPLUG_CPU && NOTIFIER_ERROR_INJECTION
help
This option provides a kernel module that can be used to test
the error handling of the cpu notifiers by injecting artificial
errors to CPU notifier chain callbacks. It is controlled through
debugfs interface under /sys/kernel/debug/notifier-error-inject/cpu
If the notifier call chain should be failed with some events
notified, write the error code to "actions/<notifier event>/error".
Example: Inject CPU offline error (-1 == -EPERM)
# cd /sys/kernel/debug/notifier-error-inject/cpu
# echo -1 > actions/CPU_DOWN_PREPARE/error
# echo 0 > /sys/devices/system/cpu/cpu1/online
bash: echo: write error: Operation not permitted
To compile this code as a module, choose M here: the module will
be called cpu-notifier-error-inject.
If unsure, say N.
config PM_NOTIFIER_ERROR_INJECT
tristate "PM notifier error injection module"
depends on PM && NOTIFIER_ERROR_INJECTION

View File

@ -128,7 +128,6 @@ obj-$(CONFIG_SWIOTLB) += swiotlb.o
obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o iommu-common.o
obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
obj-$(CONFIG_NOTIFIER_ERROR_INJECTION) += notifier-error-inject.o
obj-$(CONFIG_CPU_NOTIFIER_ERROR_INJECT) += cpu-notifier-error-inject.o
obj-$(CONFIG_PM_NOTIFIER_ERROR_INJECT) += pm-notifier-error-inject.o
obj-$(CONFIG_NETDEV_NOTIFIER_ERROR_INJECT) += netdev-notifier-error-inject.o
obj-$(CONFIG_MEMORY_NOTIFIER_ERROR_INJECT) += memory-notifier-error-inject.o

View File

@ -1,84 +0,0 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/cpu.h>
#include "notifier-error-inject.h"
static int priority;
module_param(priority, int, 0);
MODULE_PARM_DESC(priority, "specify cpu notifier priority");
#define UP_PREPARE 0
#define UP_PREPARE_FROZEN 0
#define DOWN_PREPARE 0
#define DOWN_PREPARE_FROZEN 0
static struct notifier_err_inject cpu_notifier_err_inject = {
.actions = {
{ NOTIFIER_ERR_INJECT_ACTION(UP_PREPARE) },
{ NOTIFIER_ERR_INJECT_ACTION(UP_PREPARE_FROZEN) },
{ NOTIFIER_ERR_INJECT_ACTION(DOWN_PREPARE) },
{ NOTIFIER_ERR_INJECT_ACTION(DOWN_PREPARE_FROZEN) },
{}
}
};
static int notf_err_handle(struct notifier_err_inject_action *action)
{
int ret;
ret = action->error;
if (ret)
pr_info("Injecting error (%d) to %s\n", ret, action->name);
return ret;
}
static int notf_err_inj_up_prepare(unsigned int cpu)
{
if (!cpuhp_tasks_frozen)
return notf_err_handle(&cpu_notifier_err_inject.actions[0]);
else
return notf_err_handle(&cpu_notifier_err_inject.actions[1]);
}
static int notf_err_inj_dead(unsigned int cpu)
{
if (!cpuhp_tasks_frozen)
return notf_err_handle(&cpu_notifier_err_inject.actions[2]);
else
return notf_err_handle(&cpu_notifier_err_inject.actions[3]);
}
static struct dentry *dir;
static int err_inject_init(void)
{
int err;
dir = notifier_err_inject_init("cpu", notifier_err_inject_dir,
&cpu_notifier_err_inject, priority);
if (IS_ERR(dir))
return PTR_ERR(dir);
err = cpuhp_setup_state_nocalls(CPUHP_NOTF_ERR_INJ_PREPARE,
"cpu-err-notif:prepare",
notf_err_inj_up_prepare,
notf_err_inj_dead);
if (err)
debugfs_remove_recursive(dir);
return err;
}
static void err_inject_exit(void)
{
cpuhp_remove_state_nocalls(CPUHP_NOTF_ERR_INJ_PREPARE);
debugfs_remove_recursive(dir);
}
module_init(err_inject_init);
module_exit(err_inject_exit);
MODULE_DESCRIPTION("CPU notifier error injection module");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>");

View File

@ -456,7 +456,7 @@ int kvm_timer_hyp_init(void)
kvm_info("virtual timer IRQ%d\n", host_vtimer_irq);
cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING,
"AP_KVM_ARM_TIMER_STARTING", kvm_timer_starting_cpu,
"kvm/arm/timer:starting", kvm_timer_starting_cpu,
kvm_timer_dying_cpu);
return err;
}

View File

@ -428,7 +428,7 @@ int kvm_vgic_hyp_init(void)
}
ret = cpuhp_setup_state(CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING,
"AP_KVM_ARM_VGIC_INIT_STARTING",
"kvm/arm/vgic:starting",
vgic_init_cpu_starting, vgic_init_cpu_dying);
if (ret) {
kvm_err("Cannot register vgic CPU notifier\n");

View File

@ -3944,7 +3944,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
goto out_free_1;
}
r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, "AP_KVM_STARTING",
r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, "kvm/cpu:starting",
kvm_starting_cpu, kvm_dying_cpu);
if (r)
goto out_free_2;