|
18 | 18 | #include <linux/sched.h> |
19 | 19 | #include <linux/sched/rt.h> |
20 | 20 | #include <linux/sched/task.h> |
| 21 | +#include <linux/sched/isolation.h> |
21 | 22 | #include <uapi/linux/sched/types.h> |
22 | 23 | #include <linux/task_work.h> |
23 | 24 |
|
@@ -217,7 +218,45 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, |
217 | 218 | if (!chip || !chip->irq_set_affinity) |
218 | 219 | return -EINVAL; |
219 | 220 |
|
220 | | - ret = chip->irq_set_affinity(data, mask, force); |
| 221 | + /* |
| 222 | + * If this is a managed interrupt and housekeeping is enabled on |
| 223 | + * it check whether the requested affinity mask intersects with |
| 224 | + * a housekeeping CPU. If so, then remove the isolated CPUs from |
| 225 | + * the mask and just keep the housekeeping CPU(s). This prevents |
| 226 | + * the affinity setter from routing the interrupt to an isolated |
| 227 | + * CPU to avoid that I/O submitted from a housekeeping CPU causes |
| 228 | + * interrupts on an isolated one. |
| 229 | + * |
| 230 | + * If the masks do not intersect or include online CPU(s) then |
| 231 | + * keep the requested mask. The isolated target CPUs are only |
| 232 | + * receiving interrupts when the I/O operation was submitted |
| 233 | + * directly from them. |
| 234 | + * |
| 235 | + * If all housekeeping CPUs in the affinity mask are offline, the |
| 236 | + * interrupt will be migrated by the CPU hotplug code once a |
| 237 | + * housekeeping CPU which belongs to the affinity mask comes |
| 238 | + * online. |
| 239 | + */ |
| 240 | + if (irqd_affinity_is_managed(data) && |
| 241 | + housekeeping_enabled(HK_FLAG_MANAGED_IRQ)) { |
| 242 | + const struct cpumask *hk_mask, *prog_mask; |
| 243 | + |
| 244 | + static DEFINE_RAW_SPINLOCK(tmp_mask_lock); |
| 245 | + static struct cpumask tmp_mask; |
| 246 | + |
| 247 | + hk_mask = housekeeping_cpumask(HK_FLAG_MANAGED_IRQ); |
| 248 | + |
| 249 | + raw_spin_lock(&tmp_mask_lock); |
| 250 | + cpumask_and(&tmp_mask, mask, hk_mask); |
| 251 | + if (!cpumask_intersects(&tmp_mask, cpu_online_mask)) |
| 252 | + prog_mask = mask; |
| 253 | + else |
| 254 | + prog_mask = &tmp_mask; |
| 255 | + ret = chip->irq_set_affinity(data, prog_mask, force); |
| 256 | + raw_spin_unlock(&tmp_mask_lock); |
| 257 | + } else { |
| 258 | + ret = chip->irq_set_affinity(data, mask, force); |
| 259 | + } |
221 | 260 | switch (ret) { |
222 | 261 | case IRQ_SET_MASK_OK: |
223 | 262 | case IRQ_SET_MASK_OK_DONE: |
|
0 commit comments