Skip to content

[LE-3812] RDMA/mana_ib: use the correct page table index based on hardware page size #511

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions drivers/infiniband/hw/mana/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -358,8 +358,8 @@ int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
sizeof(struct gdma_create_dma_region_resp));

create_req->length = umem->length;
create_req->offset_in_page = umem->address & (page_sz - 1);
create_req->gdma_page_type = order_base_2(page_sz) - PAGE_SHIFT;
create_req->offset_in_page = ib_umem_dma_offset(umem, page_sz);
create_req->gdma_page_type = order_base_2(page_sz) - MANA_PAGE_SHIFT;
create_req->page_count = num_pages_total;

ibdev_dbg(&dev->ib_dev, "size_dma_region %lu num_pages_total %lu\n",
Expand Down Expand Up @@ -460,13 +460,13 @@ int mana_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
PAGE_SHIFT;
prot = pgprot_writecombine(vma->vm_page_prot);

ret = rdma_user_mmap_io(ibcontext, vma, pfn, gc->db_page_size, prot,
ret = rdma_user_mmap_io(ibcontext, vma, pfn, PAGE_SIZE, prot,
NULL);
if (ret)
ibdev_dbg(ibdev, "can't rdma_user_mmap_io ret %d\n", ret);
else
ibdev_dbg(ibdev, "mapped I/O pfn 0x%llx page_size %u, ret %d\n",
pfn, gc->db_page_size, ret);
ibdev_dbg(ibdev, "mapped I/O pfn 0x%llx page_size %lu, ret %d\n",
pfn, PAGE_SIZE, ret);

return ret;
}
Expand Down
3 changes: 2 additions & 1 deletion drivers/net/ethernet/microsoft/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,8 @@ if NET_VENDOR_MICROSOFT

config MICROSOFT_MANA
tristate "Microsoft Azure Network Adapter (MANA) support"
depends on PCI_MSI && X86_64
depends on PCI_MSI
depends on X86_64 || (ARM64 && !CPU_BIG_ENDIAN)
depends on PCI_HYPERV
select AUXILIARY_BUS
help
Expand Down
10 changes: 5 additions & 5 deletions drivers/net/ethernet/microsoft/mana/gdma_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
dma_addr_t dma_handle;
void *buf;

if (length < PAGE_SIZE || !is_power_of_2(length))
if (length < MANA_PAGE_SIZE || !is_power_of_2(length))
return -EINVAL;

gmi->dev = gc->dev;
Expand Down Expand Up @@ -717,7 +717,7 @@ EXPORT_SYMBOL(mana_gd_destroy_dma_region);
static int mana_gd_create_dma_region(struct gdma_dev *gd,
struct gdma_mem_info *gmi)
{
unsigned int num_page = gmi->length / PAGE_SIZE;
unsigned int num_page = gmi->length / MANA_PAGE_SIZE;
struct gdma_create_dma_region_req *req = NULL;
struct gdma_create_dma_region_resp resp = {};
struct gdma_context *gc = gd->gdma_context;
Expand All @@ -727,10 +727,10 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd,
int err;
int i;

if (length < PAGE_SIZE || !is_power_of_2(length))
if (length < MANA_PAGE_SIZE || !is_power_of_2(length))
return -EINVAL;

if (offset_in_page(gmi->virt_addr) != 0)
if (!MANA_PAGE_ALIGNED(gmi->virt_addr))
return -EINVAL;

hwc = gc->hwc.driver_data;
Expand All @@ -751,7 +751,7 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd,
req->page_addr_list_len = num_page;

for (i = 0; i < num_page; i++)
req->page_addr_list[i] = gmi->dma_handle + i * PAGE_SIZE;
req->page_addr_list[i] = gmi->dma_handle + i * MANA_PAGE_SIZE;

err = mana_gd_send_request(gc, req_msg_size, req, sizeof(resp), &resp);
if (err)
Expand Down
14 changes: 7 additions & 7 deletions drivers/net/ethernet/microsoft/mana/hw_channel.c
Original file line number Diff line number Diff line change
Expand Up @@ -361,12 +361,12 @@ static int mana_hwc_create_cq(struct hw_channel_context *hwc, u16 q_depth,
int err;

eq_size = roundup_pow_of_two(GDMA_EQE_SIZE * q_depth);
if (eq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
eq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
if (eq_size < MANA_MIN_QSIZE)
eq_size = MANA_MIN_QSIZE;

cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth);
if (cq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
cq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
if (cq_size < MANA_MIN_QSIZE)
cq_size = MANA_MIN_QSIZE;

hwc_cq = kzalloc(sizeof(*hwc_cq), GFP_KERNEL);
if (!hwc_cq)
Expand Down Expand Up @@ -428,7 +428,7 @@ static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth,

dma_buf->num_reqs = q_depth;

buf_size = PAGE_ALIGN(q_depth * max_msg_size);
buf_size = MANA_PAGE_ALIGN(q_depth * max_msg_size);

gmi = &dma_buf->mem_info;
err = mana_gd_alloc_memory(gc, buf_size, gmi);
Expand Down Expand Up @@ -496,8 +496,8 @@ static int mana_hwc_create_wq(struct hw_channel_context *hwc,
else
queue_size = roundup_pow_of_two(GDMA_MAX_SQE_SIZE * q_depth);

if (queue_size < MINIMUM_SUPPORTED_PAGE_SIZE)
queue_size = MINIMUM_SUPPORTED_PAGE_SIZE;
if (queue_size < MANA_MIN_QSIZE)
queue_size = MANA_MIN_QSIZE;

hwc_wq = kzalloc(sizeof(*hwc_wq), GFP_KERNEL);
if (!hwc_wq)
Expand Down
8 changes: 4 additions & 4 deletions drivers/net/ethernet/microsoft/mana/mana_en.c
Original file line number Diff line number Diff line change
Expand Up @@ -1867,10 +1867,10 @@ static int mana_create_txq(struct mana_port_context *apc,
* to prevent overflow.
*/
txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32;
BUILD_BUG_ON(!PAGE_ALIGNED(txq_size));
BUILD_BUG_ON(!MANA_PAGE_ALIGNED(txq_size));

cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE;
cq_size = PAGE_ALIGN(cq_size);
cq_size = MANA_PAGE_ALIGN(cq_size);

gc = gd->gdma_context;

Expand Down Expand Up @@ -2128,8 +2128,8 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
if (err)
goto out;

rq_size = PAGE_ALIGN(rq_size);
cq_size = PAGE_ALIGN(cq_size);
rq_size = MANA_PAGE_ALIGN(rq_size);
cq_size = MANA_PAGE_ALIGN(cq_size);

/* Create RQ */
memset(&spec, 0, sizeof(spec));
Expand Down
13 changes: 7 additions & 6 deletions drivers/net/ethernet/microsoft/mana/shm_channel.c
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
#include <linux/io.h>
#include <linux/mm.h>

#include <net/mana/gdma.h>
#include <net/mana/shm_channel.h>

#define PAGE_FRAME_L48_WIDTH_BYTES 6
Expand Down Expand Up @@ -155,8 +156,8 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
return err;
}

if (!PAGE_ALIGNED(eq_addr) || !PAGE_ALIGNED(cq_addr) ||
!PAGE_ALIGNED(rq_addr) || !PAGE_ALIGNED(sq_addr))
if (!MANA_PAGE_ALIGNED(eq_addr) || !MANA_PAGE_ALIGNED(cq_addr) ||
!MANA_PAGE_ALIGNED(rq_addr) || !MANA_PAGE_ALIGNED(sq_addr))
return -EINVAL;

if ((eq_msix_index & VECTOR_MASK) != eq_msix_index)
Expand All @@ -183,31 +184,31 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,

/* EQ addr: low 48 bits of frame address */
shmem = (u64 *)ptr;
frame_addr = PHYS_PFN(eq_addr);
frame_addr = MANA_PFN(eq_addr);
*shmem = frame_addr & PAGE_FRAME_L48_MASK;
all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
(frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
ptr += PAGE_FRAME_L48_WIDTH_BYTES;

/* CQ addr: low 48 bits of frame address */
shmem = (u64 *)ptr;
frame_addr = PHYS_PFN(cq_addr);
frame_addr = MANA_PFN(cq_addr);
*shmem = frame_addr & PAGE_FRAME_L48_MASK;
all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
(frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
ptr += PAGE_FRAME_L48_WIDTH_BYTES;

/* RQ addr: low 48 bits of frame address */
shmem = (u64 *)ptr;
frame_addr = PHYS_PFN(rq_addr);
frame_addr = MANA_PFN(rq_addr);
*shmem = frame_addr & PAGE_FRAME_L48_MASK;
all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
(frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
ptr += PAGE_FRAME_L48_WIDTH_BYTES;

/* SQ addr: low 48 bits of frame address */
shmem = (u64 *)ptr;
frame_addr = PHYS_PFN(sq_addr);
frame_addr = MANA_PFN(sq_addr);
*shmem = frame_addr & PAGE_FRAME_L48_MASK;
all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
(frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
Expand Down
10 changes: 9 additions & 1 deletion include/net/mana/gdma.h
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,15 @@ struct gdma_dev {
struct auxiliary_device *adev;
};

#define MINIMUM_SUPPORTED_PAGE_SIZE PAGE_SIZE
/* MANA_PAGE_SIZE is the DMA unit */
#define MANA_PAGE_SHIFT 12
#define MANA_PAGE_SIZE BIT(MANA_PAGE_SHIFT)
#define MANA_PAGE_ALIGN(x) ALIGN((x), MANA_PAGE_SIZE)
#define MANA_PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), MANA_PAGE_SIZE)
#define MANA_PFN(a) ((a) >> MANA_PAGE_SHIFT)

/* Required by HW */
#define MANA_MIN_QSIZE MANA_PAGE_SIZE

#define GDMA_CQE_SIZE 64
#define GDMA_EQE_SIZE 16
Expand Down
3 changes: 2 additions & 1 deletion include/net/mana/mana.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,8 @@ enum TRI_STATE {

#define MAX_SEND_BUFFERS_PER_QUEUE 256

#define EQ_SIZE (8 * PAGE_SIZE)
#define EQ_SIZE (8 * MANA_PAGE_SIZE)

#define LOG2_EQ_THROTTLE 3

#define MAX_PORTS_IN_MANA_DEV 256
Expand Down