Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add pylibcudf.gpumemoryview support for len()/nbytes #18133

Open
wants to merge 8 commits into
base: branch-25.04
Choose a base branch
from
20 changes: 19 additions & 1 deletion python/pylibcudf/pylibcudf/gpumemoryview.pyx
Original file line number Diff line number Diff line change
@@ -1,4 +1,7 @@
# Copyright (c) 2023-2024, NVIDIA CORPORATION.
# Copyright (c) 2023-2025, NVIDIA CORPORATION.

import functools
import operator

__all__ = ["gpumemoryview"]

Expand Down Expand Up @@ -27,4 +30,19 @@ cdef class gpumemoryview:
def __cuda_array_interface__(self):
return self.obj.__cuda_array_interface__

def __len__(self):
return self.obj.__cuda_array_interface["shape"][0]

@property
def nbytes(self):
cai = self.obj.__cuda_array_interface__
shape, typestr = cai["shape"], cai["typestr"]

# Get element size from typestr, format is two character specifying
# the type and the latter part is the number of bytes. E.g., '<f4' for
# 32-bit (4-byte) float.
element_size = int(typestr[2:])

return functools.reduce(operator.mul, shape) * element_size

__hash__ = None
58 changes: 58 additions & 0 deletions python/pylibcudf/pylibcudf/tests/test_gpumemoryview.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
# Copyright (c) 2025, NVIDIA CORPORATION.

import itertools

import numpy as np
import pytest

import rmm

import pylibcudf as plc

DTYPES = [
"u1",
"i2",
"f4",
"f8",
"f16",
]
SIZES = [
0,
1,
1000,
1024,
10000,
]


@pytest.fixture(params=tuple(itertools.product(SIZES, DTYPES)), ids=repr)
def np_array(request):
size, dtype = request.param
return np.empty((size,), dtype=dtype)


def test_cuda_array_interface(np_array):
buf = rmm.DeviceBuffer(
ptr=np_array.__array_interface__["data"][0], size=np_array.nbytes
)
gpumemview = plc.gpumemoryview(buf)

np_array_view = np_array.view("u1")

ai = np_array_view.__array_interface__
cai = gpumemview.__cuda_array_interface__
assert cai["shape"] == ai["shape"]
assert cai["strides"] == ai["strides"]
assert cai["typestr"] == ai["typestr"]


def test_len(np_array):
buf = rmm.DeviceBuffer(
ptr=np_array.__array_interface__["data"][0], size=np_array.nbytes
)
gpumemview = plc.gpumemoryview(buf)

np_array_view = np_array.view("u1")

assert len(gpumemview) == len(np_array_view)
assert gpumemview.nbytes == np_array.nbytes
Loading