@@ -2757,46 +2757,6 @@ static ssize_t delete_region_store(struct device *dev,
27572757}
27582758DEVICE_ATTR_WO (delete_region );
27592759
2760- static void cxl_pmem_region_release (struct device * dev )
2761- {
2762- struct cxl_pmem_region * cxlr_pmem = to_cxl_pmem_region (dev );
2763- int i ;
2764-
2765- for (i = 0 ; i < cxlr_pmem -> nr_mappings ; i ++ ) {
2766- struct cxl_memdev * cxlmd = cxlr_pmem -> mapping [i ].cxlmd ;
2767-
2768- put_device (& cxlmd -> dev );
2769- }
2770-
2771- kfree (cxlr_pmem );
2772- }
2773-
2774- static const struct attribute_group * cxl_pmem_region_attribute_groups [] = {
2775- & cxl_base_attribute_group ,
2776- NULL ,
2777- };
2778-
2779- const struct device_type cxl_pmem_region_type = {
2780- .name = "cxl_pmem_region" ,
2781- .release = cxl_pmem_region_release ,
2782- .groups = cxl_pmem_region_attribute_groups ,
2783- };
2784-
2785- bool is_cxl_pmem_region (struct device * dev )
2786- {
2787- return dev -> type == & cxl_pmem_region_type ;
2788- }
2789- EXPORT_SYMBOL_NS_GPL (is_cxl_pmem_region , "CXL" );
2790-
2791- struct cxl_pmem_region * to_cxl_pmem_region (struct device * dev )
2792- {
2793- if (dev_WARN_ONCE (dev , !is_cxl_pmem_region (dev ),
2794- "not a cxl_pmem_region device\n" ))
2795- return NULL ;
2796- return container_of (dev , struct cxl_pmem_region , dev );
2797- }
2798- EXPORT_SYMBOL_NS_GPL (to_cxl_pmem_region , "CXL" );
2799-
28002760struct cxl_poison_context {
28012761 struct cxl_port * port ;
28022762 int part ;
@@ -3450,64 +3410,6 @@ static int region_offset_to_dpa_result(struct cxl_region *cxlr, u64 offset,
34503410 return - ENXIO ;
34513411}
34523412
3453- static struct lock_class_key cxl_pmem_region_key ;
3454-
3455- static int cxl_pmem_region_alloc (struct cxl_region * cxlr )
3456- {
3457- struct cxl_region_params * p = & cxlr -> params ;
3458- struct cxl_nvdimm_bridge * cxl_nvb ;
3459- struct device * dev ;
3460- int i ;
3461-
3462- guard (rwsem_read )(& cxl_rwsem .region );
3463- if (p -> state != CXL_CONFIG_COMMIT )
3464- return - ENXIO ;
3465-
3466- struct cxl_pmem_region * cxlr_pmem __free (kfree ) =
3467- kzalloc_flex (* cxlr_pmem , mapping , p -> nr_targets );
3468- if (!cxlr_pmem )
3469- return - ENOMEM ;
3470-
3471- cxlr_pmem -> hpa_range .start = p -> res -> start ;
3472- cxlr_pmem -> hpa_range .end = p -> res -> end ;
3473-
3474- /* Snapshot the region configuration underneath the cxl_rwsem.region */
3475- cxlr_pmem -> nr_mappings = p -> nr_targets ;
3476- for (i = 0 ; i < p -> nr_targets ; i ++ ) {
3477- struct cxl_endpoint_decoder * cxled = p -> targets [i ];
3478- struct cxl_memdev * cxlmd = cxled_to_memdev (cxled );
3479- struct cxl_pmem_region_mapping * m = & cxlr_pmem -> mapping [i ];
3480-
3481- /*
3482- * Regions never span CXL root devices, so by definition the
3483- * bridge for one device is the same for all.
3484- */
3485- if (i == 0 ) {
3486- cxl_nvb = cxl_find_nvdimm_bridge (cxlmd -> endpoint );
3487- if (!cxl_nvb )
3488- return - ENODEV ;
3489- cxlr -> cxl_nvb = cxl_nvb ;
3490- }
3491- m -> cxlmd = cxlmd ;
3492- get_device (& cxlmd -> dev );
3493- m -> start = cxled -> dpa_res -> start ;
3494- m -> size = resource_size (cxled -> dpa_res );
3495- m -> position = i ;
3496- }
3497-
3498- dev = & cxlr_pmem -> dev ;
3499- device_initialize (dev );
3500- lockdep_set_class (& dev -> mutex , & cxl_pmem_region_key );
3501- device_set_pm_not_required (dev );
3502- dev -> parent = & cxlr -> dev ;
3503- dev -> bus = & cxl_bus_type ;
3504- dev -> type = & cxl_pmem_region_type ;
3505- cxlr_pmem -> cxlr = cxlr ;
3506- cxlr -> cxlr_pmem = no_free_ptr (cxlr_pmem );
3507-
3508- return 0 ;
3509- }
3510-
35113413static void cxl_dax_region_release (struct device * dev )
35123414{
35133415 struct cxl_dax_region * cxlr_dax = to_cxl_dax_region (dev );
@@ -3571,92 +3473,6 @@ static struct cxl_dax_region *cxl_dax_region_alloc(struct cxl_region *cxlr)
35713473 return cxlr_dax ;
35723474}
35733475
3574- static void cxlr_pmem_unregister (void * _cxlr_pmem )
3575- {
3576- struct cxl_pmem_region * cxlr_pmem = _cxlr_pmem ;
3577- struct cxl_region * cxlr = cxlr_pmem -> cxlr ;
3578- struct cxl_nvdimm_bridge * cxl_nvb = cxlr -> cxl_nvb ;
3579-
3580- /*
3581- * Either the bridge is in ->remove() context under the device_lock(),
3582- * or cxlr_release_nvdimm() is cancelling the bridge's release action
3583- * for @cxlr_pmem and doing it itself (while manually holding the bridge
3584- * lock).
3585- */
3586- device_lock_assert (& cxl_nvb -> dev );
3587- cxlr -> cxlr_pmem = NULL ;
3588- cxlr_pmem -> cxlr = NULL ;
3589- device_unregister (& cxlr_pmem -> dev );
3590- }
3591-
3592- static void cxlr_release_nvdimm (void * _cxlr )
3593- {
3594- struct cxl_region * cxlr = _cxlr ;
3595- struct cxl_nvdimm_bridge * cxl_nvb = cxlr -> cxl_nvb ;
3596-
3597- scoped_guard (device , & cxl_nvb -> dev ) {
3598- if (cxlr -> cxlr_pmem )
3599- devm_release_action (& cxl_nvb -> dev , cxlr_pmem_unregister ,
3600- cxlr -> cxlr_pmem );
3601- }
3602- cxlr -> cxl_nvb = NULL ;
3603- put_device (& cxl_nvb -> dev );
3604- }
3605-
3606- /**
3607- * devm_cxl_add_pmem_region() - add a cxl_region-to-nd_region bridge
3608- * @cxlr: parent CXL region for this pmem region bridge device
3609- *
3610- * Return: 0 on success negative error code on failure.
3611- */
3612- static int devm_cxl_add_pmem_region (struct cxl_region * cxlr )
3613- {
3614- struct cxl_pmem_region * cxlr_pmem ;
3615- struct cxl_nvdimm_bridge * cxl_nvb ;
3616- struct device * dev ;
3617- int rc ;
3618-
3619- rc = cxl_pmem_region_alloc (cxlr );
3620- if (rc )
3621- return rc ;
3622- cxlr_pmem = cxlr -> cxlr_pmem ;
3623- cxl_nvb = cxlr -> cxl_nvb ;
3624-
3625- dev = & cxlr_pmem -> dev ;
3626- rc = dev_set_name (dev , "pmem_region%d" , cxlr -> id );
3627- if (rc )
3628- goto err ;
3629-
3630- rc = device_add (dev );
3631- if (rc )
3632- goto err ;
3633-
3634- dev_dbg (& cxlr -> dev , "%s: register %s\n" , dev_name (dev -> parent ),
3635- dev_name (dev ));
3636-
3637- scoped_guard (device , & cxl_nvb -> dev ) {
3638- if (cxl_nvb -> dev .driver )
3639- rc = devm_add_action_or_reset (& cxl_nvb -> dev ,
3640- cxlr_pmem_unregister ,
3641- cxlr_pmem );
3642- else
3643- rc = - ENXIO ;
3644- }
3645-
3646- if (rc )
3647- goto err_bridge ;
3648-
3649- /* @cxlr carries a reference on @cxl_nvb until cxlr_release_nvdimm */
3650- return devm_add_action_or_reset (& cxlr -> dev , cxlr_release_nvdimm , cxlr );
3651-
3652- err :
3653- put_device (dev );
3654- err_bridge :
3655- put_device (& cxl_nvb -> dev );
3656- cxlr -> cxl_nvb = NULL ;
3657- return rc ;
3658- }
3659-
36603476static void cxlr_dax_unregister (void * _cxlr_dax )
36613477{
36623478 struct cxl_dax_region * cxlr_dax = _cxlr_dax ;
0 commit comments