|
3 | 3 | #include <linux/memregion.h> |
4 | 4 | #include <linux/module.h> |
5 | 5 | #include <linux/dax.h> |
| 6 | +#include <cxl/cxl.h> |
6 | 7 | #include "../bus.h" |
7 | 8 |
|
8 | 9 | static bool region_idle; |
@@ -58,6 +59,23 @@ static void release_hmem(void *pdev) |
58 | 59 | platform_device_unregister(pdev); |
59 | 60 | } |
60 | 61 |
|
| 62 | +struct dax_defer_work { |
| 63 | + struct platform_device *pdev; |
| 64 | + struct work_struct work; |
| 65 | +}; |
| 66 | + |
| 67 | +static void process_defer_work(struct work_struct *w); |
| 68 | + |
| 69 | +static struct dax_defer_work dax_hmem_work = { |
| 70 | + .work = __WORK_INITIALIZER(dax_hmem_work.work, process_defer_work), |
| 71 | +}; |
| 72 | + |
| 73 | +void dax_hmem_flush_work(void) |
| 74 | +{ |
| 75 | + flush_work(&dax_hmem_work.work); |
| 76 | +} |
| 77 | +EXPORT_SYMBOL_GPL(dax_hmem_flush_work); |
| 78 | + |
61 | 79 | static int __hmem_register_device(struct device *host, int target_nid, |
62 | 80 | const struct resource *res) |
63 | 81 | { |
@@ -122,15 +140,66 @@ static int hmem_register_device(struct device *host, int target_nid, |
122 | 140 | if (IS_ENABLED(CONFIG_DEV_DAX_CXL) && |
123 | 141 | region_intersects(res->start, resource_size(res), IORESOURCE_MEM, |
124 | 142 | IORES_DESC_CXL) != REGION_DISJOINT) { |
| 143 | + if (!dax_hmem_initial_probe) { |
| 144 | + dev_dbg(host, "await CXL initial probe: %pr\n", res); |
| 145 | + queue_work(system_long_wq, &dax_hmem_work.work); |
| 146 | + return 0; |
| 147 | + } |
125 | 148 | dev_dbg(host, "deferring range to CXL: %pr\n", res); |
126 | 149 | return 0; |
127 | 150 | } |
128 | 151 |
|
129 | 152 | return __hmem_register_device(host, target_nid, res); |
130 | 153 | } |
131 | 154 |
|
| 155 | +static int hmem_register_cxl_device(struct device *host, int target_nid, |
| 156 | + const struct resource *res) |
| 157 | +{ |
| 158 | + if (region_intersects(res->start, resource_size(res), IORESOURCE_MEM, |
| 159 | + IORES_DESC_CXL) == REGION_DISJOINT) |
| 160 | + return 0; |
| 161 | + |
| 162 | + if (cxl_region_contains_resource((struct resource *)res)) { |
| 163 | + dev_dbg(host, "CXL claims resource, dropping: %pr\n", res); |
| 164 | + return 0; |
| 165 | + } |
| 166 | + |
| 167 | + dev_dbg(host, "CXL did not claim resource, registering: %pr\n", res); |
| 168 | + return __hmem_register_device(host, target_nid, res); |
| 169 | +} |
| 170 | + |
| 171 | +static void process_defer_work(struct work_struct *w) |
| 172 | +{ |
| 173 | + struct dax_defer_work *work = container_of(w, typeof(*work), work); |
| 174 | + struct platform_device *pdev; |
| 175 | + |
| 176 | + if (!work->pdev) |
| 177 | + return; |
| 178 | + |
| 179 | + pdev = work->pdev; |
| 180 | + |
| 181 | + /* Relies on cxl_acpi and cxl_pci having had a chance to load */ |
| 182 | + wait_for_device_probe(); |
| 183 | + |
| 184 | + guard(device)(&pdev->dev); |
| 185 | + if (!pdev->dev.driver) |
| 186 | + return; |
| 187 | + |
| 188 | + if (!dax_hmem_initial_probe) { |
| 189 | + dax_hmem_initial_probe = true; |
| 190 | + walk_hmem_resources(&pdev->dev, hmem_register_cxl_device); |
| 191 | + } |
| 192 | +} |
| 193 | + |
132 | 194 | static int dax_hmem_platform_probe(struct platform_device *pdev) |
133 | 195 | { |
| 196 | + if (work_pending(&dax_hmem_work.work)) |
| 197 | + return -EBUSY; |
| 198 | + |
| 199 | + if (!dax_hmem_work.pdev) |
| 200 | + dax_hmem_work.pdev = |
| 201 | + to_platform_device(get_device(&pdev->dev)); |
| 202 | + |
134 | 203 | return walk_hmem_resources(&pdev->dev, hmem_register_device); |
135 | 204 | } |
136 | 205 |
|
@@ -168,6 +237,11 @@ static __init int dax_hmem_init(void) |
168 | 237 |
|
169 | 238 | static __exit void dax_hmem_exit(void) |
170 | 239 | { |
| 240 | + if (dax_hmem_work.pdev) { |
| 241 | + flush_work(&dax_hmem_work.work); |
| 242 | + put_device(&dax_hmem_work.pdev->dev); |
| 243 | + } |
| 244 | + |
171 | 245 | platform_driver_unregister(&dax_hmem_driver); |
172 | 246 | platform_driver_unregister(&dax_hmem_platform_driver); |
173 | 247 | } |
|
0 commit comments