1414#include <linux/pci-epf.h>
1515#include <linux/msi.h>
1616#include <linux/bitfield.h>
17+ #include <linux/sizes.h>
1718
1819#include "dw-edma-core.h"
1920
20- #define DW_PCIE_VSEC_DMA_ID 0x6
21- #define DW_PCIE_VSEC_DMA_BAR GENMASK(10, 8)
22- #define DW_PCIE_VSEC_DMA_MAP GENMASK(2, 0)
23- #define DW_PCIE_VSEC_DMA_WR_CH GENMASK(9, 0)
24- #define DW_PCIE_VSEC_DMA_RD_CH GENMASK(25, 16)
21+ /* Synopsys */
22+ #define DW_PCIE_SYNOPSYS_VSEC_DMA_ID 0x6
23+ #define DW_PCIE_SYNOPSYS_VSEC_DMA_BAR GENMASK(10, 8)
24+ #define DW_PCIE_SYNOPSYS_VSEC_DMA_MAP GENMASK(2, 0)
25+ #define DW_PCIE_SYNOPSYS_VSEC_DMA_WR_CH GENMASK(9, 0)
26+ #define DW_PCIE_SYNOPSYS_VSEC_DMA_RD_CH GENMASK(25, 16)
27+
28+ /* AMD MDB (Xilinx) specific defines */
29+ #define PCI_DEVICE_ID_XILINX_B054 0xb054
30+
31+ #define DW_PCIE_XILINX_MDB_VSEC_DMA_ID 0x6
32+ #define DW_PCIE_XILINX_MDB_VSEC_ID 0x20
33+ #define DW_PCIE_XILINX_MDB_VSEC_DMA_BAR GENMASK(10, 8)
34+ #define DW_PCIE_XILINX_MDB_VSEC_DMA_MAP GENMASK(2, 0)
35+ #define DW_PCIE_XILINX_MDB_VSEC_DMA_WR_CH GENMASK(9, 0)
36+ #define DW_PCIE_XILINX_MDB_VSEC_DMA_RD_CH GENMASK(25, 16)
37+
38+ #define DW_PCIE_XILINX_MDB_DEVMEM_OFF_REG_HIGH 0xc
39+ #define DW_PCIE_XILINX_MDB_DEVMEM_OFF_REG_LOW 0x8
40+ #define DW_PCIE_XILINX_MDB_INVALID_ADDR (~0ULL)
41+
42+ #define DW_PCIE_XILINX_MDB_LL_OFF_GAP 0x200000
43+ #define DW_PCIE_XILINX_MDB_LL_SIZE 0x800
44+ #define DW_PCIE_XILINX_MDB_DT_OFF_GAP 0x100000
45+ #define DW_PCIE_XILINX_MDB_DT_SIZE 0x800
2546
2647#define DW_BLOCK (a , b , c ) \
2748 { \
@@ -50,6 +71,7 @@ struct dw_edma_pcie_data {
5071 u8 irqs ;
5172 u16 wr_ch_cnt ;
5273 u16 rd_ch_cnt ;
74+ u64 devmem_phys_off ;
5375};
5476
5577static const struct dw_edma_pcie_data snps_edda_data = {
@@ -90,6 +112,64 @@ static const struct dw_edma_pcie_data snps_edda_data = {
90112 .rd_ch_cnt = 2 ,
91113};
92114
115+ static const struct dw_edma_pcie_data xilinx_mdb_data = {
116+ /* MDB registers location */
117+ .rg .bar = BAR_0 ,
118+ .rg .off = SZ_4K , /* 4 Kbytes */
119+ .rg .sz = SZ_8K , /* 8 Kbytes */
120+
121+ /* Other */
122+ .mf = EDMA_MF_HDMA_NATIVE ,
123+ .irqs = 1 ,
124+ .wr_ch_cnt = 8 ,
125+ .rd_ch_cnt = 8 ,
126+ };
127+
128+ static void dw_edma_set_chan_region_offset (struct dw_edma_pcie_data * pdata ,
129+ enum pci_barno bar , off_t start_off ,
130+ off_t ll_off_gap , size_t ll_size ,
131+ off_t dt_off_gap , size_t dt_size )
132+ {
133+ u16 wr_ch = pdata -> wr_ch_cnt ;
134+ u16 rd_ch = pdata -> rd_ch_cnt ;
135+ off_t off ;
136+ u16 i ;
137+
138+ off = start_off ;
139+
140+ /* Write channel LL region */
141+ for (i = 0 ; i < wr_ch ; i ++ ) {
142+ pdata -> ll_wr [i ].bar = bar ;
143+ pdata -> ll_wr [i ].off = off ;
144+ pdata -> ll_wr [i ].sz = ll_size ;
145+ off += ll_off_gap ;
146+ }
147+
148+ /* Read channel LL region */
149+ for (i = 0 ; i < rd_ch ; i ++ ) {
150+ pdata -> ll_rd [i ].bar = bar ;
151+ pdata -> ll_rd [i ].off = off ;
152+ pdata -> ll_rd [i ].sz = ll_size ;
153+ off += ll_off_gap ;
154+ }
155+
156+ /* Write channel data region */
157+ for (i = 0 ; i < wr_ch ; i ++ ) {
158+ pdata -> dt_wr [i ].bar = bar ;
159+ pdata -> dt_wr [i ].off = off ;
160+ pdata -> dt_wr [i ].sz = dt_size ;
161+ off += dt_off_gap ;
162+ }
163+
164+ /* Read channel data region */
165+ for (i = 0 ; i < rd_ch ; i ++ ) {
166+ pdata -> dt_rd [i ].bar = bar ;
167+ pdata -> dt_rd [i ].off = off ;
168+ pdata -> dt_rd [i ].sz = dt_size ;
169+ off += dt_off_gap ;
170+ }
171+ }
172+
93173static int dw_edma_pcie_irq_vector (struct device * dev , unsigned int nr )
94174{
95175 return pci_irq_vector (to_pci_dev (dev ), nr );
@@ -114,15 +194,15 @@ static const struct dw_edma_plat_ops dw_edma_pcie_plat_ops = {
114194 .pci_address = dw_edma_pcie_address ,
115195};
116196
117- static void dw_edma_pcie_get_vsec_dma_data (struct pci_dev * pdev ,
118- struct dw_edma_pcie_data * pdata )
197+ static void dw_edma_pcie_get_synopsys_dma_data (struct pci_dev * pdev ,
198+ struct dw_edma_pcie_data * pdata )
119199{
120200 u32 val , map ;
121201 u16 vsec ;
122202 u64 off ;
123203
124204 vsec = pci_find_vsec_capability (pdev , PCI_VENDOR_ID_SYNOPSYS ,
125- DW_PCIE_VSEC_DMA_ID );
205+ DW_PCIE_SYNOPSYS_VSEC_DMA_ID );
126206 if (!vsec )
127207 return ;
128208
@@ -131,23 +211,23 @@ static void dw_edma_pcie_get_vsec_dma_data(struct pci_dev *pdev,
131211 PCI_VNDR_HEADER_LEN (val ) != 0x18 )
132212 return ;
133213
134- pci_dbg (pdev , "Detected PCIe Vendor-Specific Extended Capability DMA\n" );
214+ pci_dbg (pdev , "Detected Synopsys PCIe Vendor-Specific Extended Capability DMA\n" );
135215 pci_read_config_dword (pdev , vsec + 0x8 , & val );
136- map = FIELD_GET (DW_PCIE_VSEC_DMA_MAP , val );
216+ map = FIELD_GET (DW_PCIE_SYNOPSYS_VSEC_DMA_MAP , val );
137217 if (map != EDMA_MF_EDMA_LEGACY &&
138218 map != EDMA_MF_EDMA_UNROLL &&
139219 map != EDMA_MF_HDMA_COMPAT &&
140220 map != EDMA_MF_HDMA_NATIVE )
141221 return ;
142222
143223 pdata -> mf = map ;
144- pdata -> rg .bar = FIELD_GET (DW_PCIE_VSEC_DMA_BAR , val );
224+ pdata -> rg .bar = FIELD_GET (DW_PCIE_SYNOPSYS_VSEC_DMA_BAR , val );
145225
146226 pci_read_config_dword (pdev , vsec + 0xc , & val );
147227 pdata -> wr_ch_cnt = min_t (u16 , pdata -> wr_ch_cnt ,
148- FIELD_GET (DW_PCIE_VSEC_DMA_WR_CH , val ));
228+ FIELD_GET (DW_PCIE_SYNOPSYS_VSEC_DMA_WR_CH , val ));
149229 pdata -> rd_ch_cnt = min_t (u16 , pdata -> rd_ch_cnt ,
150- FIELD_GET (DW_PCIE_VSEC_DMA_RD_CH , val ));
230+ FIELD_GET (DW_PCIE_SYNOPSYS_VSEC_DMA_RD_CH , val ));
151231
152232 pci_read_config_dword (pdev , vsec + 0x14 , & val );
153233 off = val ;
@@ -157,6 +237,64 @@ static void dw_edma_pcie_get_vsec_dma_data(struct pci_dev *pdev,
157237 pdata -> rg .off = off ;
158238}
159239
240+ static void dw_edma_pcie_get_xilinx_dma_data (struct pci_dev * pdev ,
241+ struct dw_edma_pcie_data * pdata )
242+ {
243+ u32 val , map ;
244+ u16 vsec ;
245+ u64 off ;
246+
247+ pdata -> devmem_phys_off = DW_PCIE_XILINX_MDB_INVALID_ADDR ;
248+
249+ vsec = pci_find_vsec_capability (pdev , PCI_VENDOR_ID_XILINX ,
250+ DW_PCIE_XILINX_MDB_VSEC_DMA_ID );
251+ if (!vsec )
252+ return ;
253+
254+ pci_read_config_dword (pdev , vsec + PCI_VNDR_HEADER , & val );
255+ if (PCI_VNDR_HEADER_REV (val ) != 0x00 ||
256+ PCI_VNDR_HEADER_LEN (val ) != 0x18 )
257+ return ;
258+
259+ pci_dbg (pdev , "Detected Xilinx PCIe Vendor-Specific Extended Capability DMA\n" );
260+ pci_read_config_dword (pdev , vsec + 0x8 , & val );
261+ map = FIELD_GET (DW_PCIE_XILINX_MDB_VSEC_DMA_MAP , val );
262+ if (map != EDMA_MF_HDMA_NATIVE )
263+ return ;
264+
265+ pdata -> mf = map ;
266+ pdata -> rg .bar = FIELD_GET (DW_PCIE_XILINX_MDB_VSEC_DMA_BAR , val );
267+
268+ pci_read_config_dword (pdev , vsec + 0xc , & val );
269+ pdata -> wr_ch_cnt = min (pdata -> wr_ch_cnt ,
270+ FIELD_GET (DW_PCIE_XILINX_MDB_VSEC_DMA_WR_CH , val ));
271+ pdata -> rd_ch_cnt = min (pdata -> rd_ch_cnt ,
272+ FIELD_GET (DW_PCIE_XILINX_MDB_VSEC_DMA_RD_CH , val ));
273+
274+ pci_read_config_dword (pdev , vsec + 0x14 , & val );
275+ off = val ;
276+ pci_read_config_dword (pdev , vsec + 0x10 , & val );
277+ off <<= 32 ;
278+ off |= val ;
279+ pdata -> rg .off = off ;
280+
281+ vsec = pci_find_vsec_capability (pdev , PCI_VENDOR_ID_XILINX ,
282+ DW_PCIE_XILINX_MDB_VSEC_ID );
283+ if (!vsec )
284+ return ;
285+
286+ pci_read_config_dword (pdev ,
287+ vsec + DW_PCIE_XILINX_MDB_DEVMEM_OFF_REG_HIGH ,
288+ & val );
289+ off = val ;
290+ pci_read_config_dword (pdev ,
291+ vsec + DW_PCIE_XILINX_MDB_DEVMEM_OFF_REG_LOW ,
292+ & val );
293+ off <<= 32 ;
294+ off |= val ;
295+ pdata -> devmem_phys_off = off ;
296+ }
297+
160298static int dw_edma_pcie_probe (struct pci_dev * pdev ,
161299 const struct pci_device_id * pid )
162300{
@@ -184,7 +322,29 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
184322 * Tries to find if exists a PCIe Vendor-Specific Extended Capability
185323 * for the DMA, if one exists, then reconfigures it.
186324 */
187- dw_edma_pcie_get_vsec_dma_data (pdev , vsec_data );
325+ dw_edma_pcie_get_synopsys_dma_data (pdev , vsec_data );
326+
327+ if (pdev -> vendor == PCI_VENDOR_ID_XILINX ) {
328+ dw_edma_pcie_get_xilinx_dma_data (pdev , vsec_data );
329+
330+ /*
331+ * There is no valid address found for the LL memory
332+ * space on the device side.
333+ */
334+ if (vsec_data -> devmem_phys_off == DW_PCIE_XILINX_MDB_INVALID_ADDR )
335+ return - ENOMEM ;
336+
337+ /*
338+ * Configure the channel LL and data blocks if number of
339+ * channels enabled in VSEC capability are more than the
340+ * channels configured in xilinx_mdb_data.
341+ */
342+ dw_edma_set_chan_region_offset (vsec_data , BAR_2 , 0 ,
343+ DW_PCIE_XILINX_MDB_LL_OFF_GAP ,
344+ DW_PCIE_XILINX_MDB_LL_SIZE ,
345+ DW_PCIE_XILINX_MDB_DT_OFF_GAP ,
346+ DW_PCIE_XILINX_MDB_DT_SIZE );
347+ }
188348
189349 /* Mapping PCI BAR regions */
190350 mask = BIT (vsec_data -> rg .bar );
@@ -367,6 +527,8 @@ static void dw_edma_pcie_remove(struct pci_dev *pdev)
367527
368528static const struct pci_device_id dw_edma_pcie_id_table [] = {
369529 { PCI_DEVICE_DATA (SYNOPSYS , EDDA , & snps_edda_data ) },
530+ { PCI_VDEVICE (XILINX , PCI_DEVICE_ID_XILINX_B054 ),
531+ (kernel_ulong_t )& xilinx_mdb_data },
370532 { }
371533};
372534MODULE_DEVICE_TABLE (pci , dw_edma_pcie_id_table );
0 commit comments