Skip to content
This repository was archived by the owner on Nov 17, 2023. It is now read-only.

Commit 683c974

Browse files
authored
[API] Standardize MXNet NumPy creation functions (#20572)
* standardize mxnet numpy creation functions * update * fix linspace * merge & add tests * add NumpyLinspaceParam * fix lint' ' * fix * add indexing test * fix tests * fix sanity * fix lint * fix tests * disable warning * fix * update * skip signature standardization * fix lint * update * rm test_contants * Add Code Signing Key * Revert "Add Code Signing Key" This reverts commit b09814c. * Replace context with device & update multiarray.py/_op.py * ctx => device * ctx/context => device * fix * fix multiarray * update ndarray.py * fix * fix * fix tests * update * fix tests * update rand_zipfian * update * device => cuda_device in util.py * context.gpu_memory_info => device.gpu_memory_info * fix docs * rm context in doc * fix lint * remove npv * Revert "remove npv" This reverts commit e775844.
1 parent 9266a91 commit 683c974

177 files changed

Lines changed: 3180 additions & 2489 deletions

File tree

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

benchmark/python/sparse/cast_storage.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ def measure_cost(repeat, f, *args, **kwargs):
4141

4242
def run_cast_storage_synthetic():
4343
def dense_to_sparse(m, n, density, ctx, repeat, stype):
44-
set_default_context(ctx)
44+
set_default_device(ctx)
4545
data_shape = (m, n)
4646
dns_data = rand_ndarray(data_shape, stype, density).tostype('default')
4747
dns_data.wait_to_read()

benchmark/python/sparse/dot.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
import mxnet as mx
2727
import numpy as np
2828
import numpy.random as rnd
29-
from mxnet.test_utils import rand_ndarray, set_default_context, assert_almost_equal, get_bz2_data
29+
from mxnet.test_utils import rand_ndarray, set_default_device, assert_almost_equal, get_bz2_data
3030
from mxnet.base import check_call, _LIB
3131
from util import estimate_density
3232

@@ -267,7 +267,7 @@ def test_dot_synthetic(data_dict):
267267
# Benchmark MXNet and Scipys dot operator
268268
def bench_dot(lhs_shape, rhs_shape, lhs_stype, rhs_stype,
269269
lhs_den, rhs_den, trans_lhs, ctx, num_repeat=10, fw="mxnet", distribution="uniform"):
270-
set_default_context(ctx)
270+
set_default_device(ctx)
271271
assert fw == "mxnet" or fw == "scipy"
272272
# Set funcs
273273
dot_func_sparse = mx.nd.sparse.dot if fw == "mxnet" else sp.spmatrix.dot

benchmark/python/sparse/sparse_op.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -155,7 +155,7 @@ def measure_cost_backward_baseline(repeat, dot, transpose, lhs, rhs):
155155
return diff / repeat
156156

157157
def bench_dot_forward(m, k, n, density, ctx, repeat):
158-
set_default_context(ctx)
158+
set_default_device(ctx)
159159
dns = mx.nd.random.uniform(shape=(k, n)).copyto(ctx)
160160
data_shape = (m, k)
161161
csr_data = rand_ndarray(data_shape, 'csr', density)
@@ -184,7 +184,7 @@ def bench_dot_forward(m, k, n, density, ctx, repeat):
184184
ratio_baseline, costs_baseline[0], costs_baseline[1]))
185185

186186
def bench_dot_backward(m, k, n, density, ctx, repeat):
187-
set_default_context(ctx)
187+
set_default_device(ctx)
188188
dns = mx.nd.random.uniform(shape=(m, n)).copyto(ctx)
189189
data_shape = (m, k)
190190
csr_data = rand_ndarray(data_shape, 'csr', density)

ci/docker/runtime_functions.sh

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -874,6 +874,8 @@ unittest_array_api_standardization() {
874874
export DMLC_LOG_STACK_TRACE_DEPTH=100
875875
python3 -m pytest --durations=50 --cov-report xml:tests_api.xml --verbose \
876876
array_api_tests/test_type_promotion.py::test_elementwise_function_two_arg_bool_type_promotion
877+
python3 -m pytest --durations=50 --cov-report xml:tests_api.xml --verbose array_api_tests/test_creation_functions.py
878+
python3 -m pytest --durations=50 --cov-report xml:tests_api.xml --verbose array_api_tests/test_indexing.py
877879
popd
878880
}
879881

docs/python_docs/python/api/context/index.rst renamed to docs/python_docs/python/api/device/index.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,9 +15,9 @@
1515
specific language governing permissions and limitations
1616
under the License.
1717
18-
mxnet.context
18+
mxnet.device
1919
=============
2020

21-
.. automodule:: mxnet.context
21+
.. automodule:: mxnet.device
2222
:members:
2323
:autosummary:

docs/python_docs/python/api/index.rst

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -86,10 +86,10 @@ Gluon related modules
8686
Key value store interface of MXNet for parameter synchronization.
8787

8888
.. card::
89-
:title: mxnet.context
90-
:link: mxnet/context/index.html
89+
:title: mxnet.device
90+
:link: mxnet/device/index.html
9191

92-
CPU and GPU context information.
92+
CPU and GPU device information.
9393

9494
.. card::
9595
:title: mxnet.profiler
@@ -116,10 +116,10 @@ Advanced modules
116116
API for querying MXNet enabled features.
117117

118118
.. card::
119-
:title: mxnet.context
120-
:link: context/index.html
119+
:title: mxnet.device
120+
:link: device/index.html
121121

122-
MXNet array context for specifying in-memory storage device.
122+
MXNet array device for specifying in-memory storage device.
123123

124124
.. card::
125125
:title: mxnet.profiler

docs/python_docs/python/api/npx/index.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ Devices
5050
cpu_pinned
5151
gpu
5252
gpu_memory_info
53-
current_context
53+
current_device
5454
num_gpus
5555

5656
Nerual networks

docs/python_docs/python/tutorials/deploy/inference/image_classification_jetson.md

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -71,18 +71,18 @@ And we are done. You can test the installation now by importing mxnet from pytho
7171

7272
## Running a pre-trained ResNet-50 model on Jetson
7373

74-
We are now ready to run a pre-trained model and run inference on a Jetson module. In this tutorial we are using ResNet-50 model trained on Imagenet dataset. We run the following classification script with either cpu/gpu context using python3.
74+
We are now ready to run a pre-trained model and run inference on a Jetson module. In this tutorial we are using ResNet-50 model trained on Imagenet dataset. We run the following classification script with either cpu/gpu device using python3.
7575

7676
```{.python .input}
7777
from mxnet import gluon
7878
import mxnet as mx
7979
80-
# set context
80+
# set device
8181
gpus = mx.test_utils.list_gpus()
82-
ctx = mx.gpu() if gpus else mx.cpu()
82+
device = mx.gpu() if gpus else mx.cpu()
8383
8484
# load pre-trained model
85-
net = gluon.model_zoo.vision.resnet50_v1(pretrained=True, ctx=ctx)
85+
net = gluon.model_zoo.vision.resnet50_v1(pretrained=True, device=device)
8686
net.hybridize(static_alloc=True, static_shape=True)
8787
8888
# load labels
@@ -99,7 +99,7 @@ img = mx.image.color_normalize(img.astype(dtype='float32')/255,
9999
std=mx.np.array([0.229, 0.224, 0.225])) # normalize
100100
img = img.transpose((2, 0, 1)) # channel first
101101
img = mx.np.expand_dims(img, axis=0) # batchify
102-
img = img.as_in_ctx(ctx)
102+
img = img.to_device(device)
103103
104104
prob = mx.npx.softmax(net(img)) # predict and normalize output
105105
idx = mx.npx.topk(prob, k=5)[0] # get top 5 result

docs/python_docs/python/tutorials/extend/customop.md

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,7 @@ class SigmoidProp(mx.operator.CustomOpProp):
101101
# return 3 lists representing inputs shapes, outputs shapes, and aux data shapes.
102102
return (data_shape,), (output_shape,), ()
103103
104-
def create_operator(self, ctx, in_shapes, in_dtypes):
104+
def create_operator(self, device, in_shapes, in_dtypes):
105105
# create and return the CustomOp class.
106106
return Sigmoid()
107107
```
@@ -183,7 +183,7 @@ class DenseProp(mx.operator.CustomOpProp):
183183
# return 3 lists representing inputs shapes, outputs shapes, and aux data shapes.
184184
return (data_shape, weight_shape), (output_shape,), ()
185185
186-
def create_operator(self, ctx, in_shapes, in_dtypes):
186+
def create_operator(self, device, in_shapes, in_dtypes):
187187
# create and return the CustomOp class.
188188
return Dense(self._bias)
189189
```
@@ -201,8 +201,8 @@ class DenseBlock(mx.gluon.Block):
201201
self.weight = gluon.Parameter('weight', shape=(channels, in_channels))
202202
203203
def forward(self, x):
204-
ctx = x.context
205-
return mx.nd.Custom(x, self.weight.data(ctx), bias=self._bias, op_type='dense')
204+
device = x.device
205+
return mx.nd.Custom(x, self.weight.data(device), bias=self._bias, op_type='dense')
206206
```
207207

208208
### Example usage

docs/python_docs/python/tutorials/getting-started/crash-course/1-nparray.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -195,7 +195,7 @@ a = np.array(a)
195195
(type(a), a)
196196
```
197197

198-
Additionally, you can move them to different GPU contexts. You will dive more
198+
Additionally, you can move them to different GPU devices. You will dive more
199199
into this later, but here is an example for now.
200200

201201
```{.python .input}

0 commit comments

Comments
 (0)