Yes, I will share a minimal reproducible script.
The following script can reproduce the issue. In this script, use_fp8=true is set when using LowLevelZeroPlugin:
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import colossalai
from colossalai.booster import Booster
from colossalai.nn.optimizer import CPUAdam, Lamb
from colossalai.booster.plugin import GeminiPlugin, LowLevelZeroPlugin
class RandomDataset(Dataset):
def __init__(self, num_samples=32 * 100, input_dim=1024, num_classes=10):
self.x = torch.randn(num_samples, input_dim)
self.y = torch.randint(0, num_classes, (num_samples,))
def __len__(self):
return len(self.x)
def __getitem__(self, idx):
return self.x[idx], self.y[idx]
class MLP(nn.Module):
def __init__(self, input_dim=1024, hidden_dim=512, num_layers=10, num_classes=10):
super().__init__()
layers = []
for i in range(num_layers):
in_dim = input_dim if i == 0 else hidden_dim
layers.append(nn.Linear(in_dim, hidden_dim))
layers.append(nn.ReLU())
layers.append(nn.Linear(hidden_dim, num_classes))
self.net = nn.Sequential(*layers)
def forward(self, x):
return self.net(x)
def main():
seed = 1024
colossalai.launch_from_torch(seed=seed)
plugin = LowLevelZeroPlugin(
use_fp8=True
)
booster = Booster(plugin=plugin)
model = MLP()
optimizer = optim.Adam(model.parameters(), lr=1e-3)
criterion = nn.CrossEntropyLoss()
dataset = RandomDataset()
train_dataloader = DataLoader(dataset, batch_size=32, shuffle=False)
model, optimizer, criterion, train_dataloader, _ = booster.boost(model, optimizer, criterion, train_dataloader)
precision = getattr(plugin, "precision", "fp16")
dtype_map = {"fp16": torch.float16, "bf16": torch.bfloat16, "fp32": torch.float32}
dtype = dtype_map.get(precision, torch.float16)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.train()
for epoch in range(1):
total_loss = 0
for step, (x, y) in enumerate(train_dataloader):
x = x.to(device=device, dtype=dtype)
y = y.to(device=device)
optimizer.zero_grad()
output = model(x)
loss = criterion(output, y)
booster.backward(loss, optimizer)
optimizer.step()
total_loss += loss.item()
print(f"[Epoch {epoch}] step {step}, loss = {loss.item():.4f}")
avg_loss = total_loss / len(train_dataloader)
print(f"Epoch {epoch} finished, average loss = {avg_loss:.4f}")
if __name__ == "__main__":
main()
W1101 19:36:20.780868 936565 site-packages/torch/distributed/run.py:793]
W1101 19:36:20.780868 936565 site-packages/torch/distributed/run.py:793] *****************************************
W1101 19:36:20.780868 936565 site-packages/torch/distributed/run.py:793] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
W1101 19:36:20.780868 936565 site-packages/torch/distributed/run.py:793] *****************************************
[11/01/25 19:36:24] INFO colossalai - colossalai - INFO:
/home/yanzhen/miniconda3/envs/colossal/lib/python3.
9/site-packages/colossalai/initialize.py:75 launch
INFO colossalai - colossalai - INFO: Distributed
environment is initialized, world size: 4
[rank1]:E1101 19:36:25.141706 937254 site-packages/torch/_subclasses/fake_tensor.py:2017] [0/0] failed while attempting to run meta for aten.view.default
[rank1]:E1101 19:36:25.141706 937254 site-packages/torch/_subclasses/fake_tensor.py:2017] [0/0] Traceback (most recent call last):
[rank1]:E1101 19:36:25.141706 937254 site-packages/torch/_subclasses/fake_tensor.py:2017] [0/0] File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_subclasses/fake_tensor.py", line 2013, in _dispatch_impl
[rank1]:E1101 19:36:25.141706 937254 site-packages/torch/_subclasses/fake_tensor.py:2017] [0/0] r = func(*args, **kwargs)
[rank1]:E1101 19:36:25.141706 937254 site-packages/torch/_subclasses/fake_tensor.py:2017] [0/0] File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_ops.py", line 716, in __call__
[rank1]:E1101 19:36:25.141706 937254 site-packages/torch/_subclasses/fake_tensor.py:2017] [0/0] return self._op(*args, **kwargs)
[rank1]:E1101 19:36:25.141706 937254 site-packages/torch/_subclasses/fake_tensor.py:2017] [0/0] File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_refs/__init__.py", line 4591, in view
[rank1]:E1101 19:36:25.141706 937254 site-packages/torch/_subclasses/fake_tensor.py:2017] [0/0] return _reshape_view_helper(a, *shape, allow_copy=False)
[rank1]:E1101 19:36:25.141706 937254 site-packages/torch/_subclasses/fake_tensor.py:2017] [0/0] File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_refs/__init__.py", line 3659, in _reshape_view_helper
[rank1]:E1101 19:36:25.141706 937254 site-packages/torch/_subclasses/fake_tensor.py:2017] [0/0] shape = utils.infer_size(shape, a.numel())
[rank1]:E1101 19:36:25.141706 937254 site-packages/torch/_subclasses/fake_tensor.py:2017] [0/0] File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_prims_common/__init__.py", line 901, in infer_size
[rank1]:E1101 19:36:25.141706 937254 site-packages/torch/_subclasses/fake_tensor.py:2017] [0/0] torch._check(
[rank1]:E1101 19:36:25.141706 937254 site-packages/torch/_subclasses/fake_tensor.py:2017] [0/0] File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/__init__.py", line 1564, in _check
[rank1]:E1101 19:36:25.141706 937254 site-packages/torch/_subclasses/fake_tensor.py:2017] [0/0] _check_with(RuntimeError, cond, message)
[rank1]:E1101 19:36:25.141706 937254 site-packages/torch/_subclasses/fake_tensor.py:2017] [0/0] File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/__init__.py", line 1546, in _check_with
[rank1]:E1101 19:36:25.141706 937254 site-packages/torch/_subclasses/fake_tensor.py:2017] [0/0] raise error_type(message_evaluated)
[rank1]:E1101 19:36:25.141706 937254 site-packages/torch/_subclasses/fake_tensor.py:2017] [0/0] RuntimeError: shape '[32, 512]' is invalid for input of size 512
[rank1]: Traceback (most recent call last):
[rank1]: File "/home/yanzhen/distributed_test/colossalAI/test/bug2.py", line 88, in <module>
[rank1]: main()
[rank1]: File "/home/yanzhen/distributed_test/colossalAI/test/bug2.py", line 74, in main
[rank1]: output = model(x)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
[rank1]: return self._call_impl(*args, **kwargs)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1747, in _call_impl
[rank1]: return forward_call(*args, **kwargs)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/colossalai/booster/plugin/low_level_zero_plugin.py", line 107, in forward
[rank1]: return super().forward(*args, **kwargs)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/colossalai/interface/model.py", line 127, in forward
[rank1]: return self.module(*args, **kwargs)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
[rank1]: return self._call_impl(*args, **kwargs)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1747, in _call_impl
[rank1]: return forward_call(*args, **kwargs)
[rank1]: File "/home/yanzhen/distributed_test/colossalAI/test/bug2.py", line 38, in forward
[rank1]: return self.net(x)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
[rank1]: return self._call_impl(*args, **kwargs)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1747, in _call_impl
[rank1]: return forward_call(*args, **kwargs)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/nn/modules/container.py", line 250, in forward
[rank1]: input = module(input)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
[rank1]: return self._call_impl(*args, **kwargs)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1747, in _call_impl
[rank1]: return forward_call(*args, **kwargs)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/nn/modules/linear.py", line 125, in forward
[rank1]: return F.linear(input, self.weight, self.bias)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/colossalai/tensor/colo_parameter.py", line 66, in __torch_function__
[rank1]: ret = super().__torch_function__(func, types, args, kwargs)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/colossalai/tensor/colo_tensor.py", line 91, in __torch_function__
[rank1]: ret = func(*args, **kwargs)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/colossalai/quantization/fp8.py", line 845, in linear_fp8
[rank1]: out = _linear_fp8(input, weight, bias)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_dynamo/eval_frame.py", line 465, in _fn
[rank1]: return fn(*args, **kwargs)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_dynamo/convert_frame.py", line 1269, in __call__
[rank1]: return self._torchdynamo_orig_callable(
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_dynamo/convert_frame.py", line 1064, in __call__
[rank1]: result = self._inner_convert(
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_dynamo/convert_frame.py", line 526, in __call__
[rank1]: return _compile(
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_dynamo/convert_frame.py", line 924, in _compile
[rank1]: guarded_code = compile_inner(code, one_graph, hooks, transform)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_dynamo/convert_frame.py", line 666, in compile_inner
[rank1]: return _compile_inner(code, one_graph, hooks, transform)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_utils_internal.py", line 87, in wrapper_function
[rank1]: return function(*args, **kwargs)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_dynamo/convert_frame.py", line 699, in _compile_inner
[rank1]: out_code = transform_code_object(code, transform)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_dynamo/bytecode_transformation.py", line 1322, in transform_code_object
[rank1]: transformations(instructions, code_options)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_dynamo/convert_frame.py", line 219, in _fn
[rank1]: return fn(*args, **kwargs)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_dynamo/convert_frame.py", line 634, in transform
[rank1]: tracer.run()
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_dynamo/symbolic_convert.py", line 2796, in run
[rank1]: super().run()
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_dynamo/symbolic_convert.py", line 983, in run
[rank1]: while self.step():
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_dynamo/symbolic_convert.py", line 895, in step
[rank1]: self.dispatch_table[inst.opcode](self, inst)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_dynamo/symbolic_convert.py", line 582, in wrapper
[rank1]: return inner_fn(self, inst)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_dynamo/symbolic_convert.py", line 1602, in CALL_FUNCTION
[rank1]: self.call_function(fn, args, {})
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_dynamo/symbolic_convert.py", line 830, in call_function
[rank1]: self.push(fn.call_function(self, args, kwargs)) # type: ignore[arg-type]
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_dynamo/variables/misc.py", line 1024, in call_function
[rank1]: return self.obj.call_method(tx, self.name, args, kwargs)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_dynamo/variables/misc.py", line 774, in call_method
[rank1]: return self.call_apply(tx, args, kwargs)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_dynamo/variables/misc.py", line 694, in call_apply
[rank1]: val = AutogradFunctionApplyVariable(
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_dynamo/variables/higher_order_ops.py", line 2015, in call_function
[rank1]: (fwd_out, _), fwd_graph, fwd_freevars = speculate_subgraph(
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_dynamo/variables/higher_order_ops.py", line 462, in speculate_subgraph
[rank1]: output = f.call_function(tx, args, sub_kwargs)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_dynamo/variables/functions.py", line 324, in call_function
[rank1]: return super().call_function(tx, args, kwargs)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_dynamo/variables/functions.py", line 111, in call_function
[rank1]: return tx.inline_user_function_return(self, [*self.self_args(), *args], kwargs)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_dynamo/symbolic_convert.py", line 836, in inline_user_function_return
[rank1]: return InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_dynamo/symbolic_convert.py", line 3011, in inline_call
[rank1]: return cls.inline_call_(parent, func, args, kwargs)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_dynamo/symbolic_convert.py", line 3139, in inline_call_
[rank1]: tracer.run()
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_dynamo/symbolic_convert.py", line 983, in run
[rank1]: while self.step():
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_dynamo/symbolic_convert.py", line 895, in step
[rank1]: self.dispatch_table[inst.opcode](self, inst)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_dynamo/symbolic_convert.py", line 582, in wrapper
[rank1]: return inner_fn(self, inst)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_dynamo/symbolic_convert.py", line 1680, in CALL_FUNCTION_EX
[rank1]: self.call_function(fn, argsvars.items, kwargsvars)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_dynamo/symbolic_convert.py", line 830, in call_function
[rank1]: self.push(fn.call_function(self, args, kwargs)) # type: ignore[arg-type]
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_dynamo/variables/misc.py", line 1024, in call_function
[rank1]: return self.obj.call_method(tx, self.name, args, kwargs)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_dynamo/variables/tensor.py", line 535, in call_method
[rank1]: return wrap_fx_proxy(
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_dynamo/variables/builder.py", line 2037, in wrap_fx_proxy
[rank1]: return wrap_fx_proxy_cls(target_cls=TensorVariable, **kwargs)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_dynamo/variables/builder.py", line 2124, in wrap_fx_proxy_cls
[rank1]: example_value = get_fake_value(proxy.node, tx, allow_non_graph_fake=True)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_dynamo/utils.py", line 2082, in get_fake_value
[rank1]: raise TorchRuntimeError(str(e)).with_traceback(e.__traceback__) from None
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_dynamo/utils.py", line 2017, in get_fake_value
[rank1]: ret_val = wrap_fake_exception(
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_dynamo/utils.py", line 1574, in wrap_fake_exception
[rank1]: return fn()
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_dynamo/utils.py", line 2018, in <lambda>
[rank1]: lambda: run_node(tx.output, node, args, kwargs, nnmodule)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_dynamo/utils.py", line 2150, in run_node
[rank1]: raise RuntimeError(make_error_message(e)).with_traceback(
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_dynamo/utils.py", line 2134, in run_node
[rank1]: return getattr(args[0], node.target)(*args[1:], **kwargs)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/utils/_stats.py", line 21, in wrapper
[rank1]: return fn(*args, **kwargs)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_subclasses/fake_tensor.py", line 1238, in __torch_dispatch__
[rank1]: return self.dispatch(func, types, args, kwargs)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_subclasses/fake_tensor.py", line 1692, in dispatch
[rank1]: return self._cached_dispatch_impl(func, types, args, kwargs)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_subclasses/fake_tensor.py", line 1339, in _cached_dispatch_impl
[rank1]: output = self._dispatch_impl(func, types, args, kwargs)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_subclasses/fake_tensor.py", line 2013, in _dispatch_impl
[rank1]: r = func(*args, **kwargs)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_ops.py", line 716, in __call__
[rank1]: return self._op(*args, **kwargs)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_refs/__init__.py", line 4591, in view
[rank1]: return _reshape_view_helper(a, *shape, allow_copy=False)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_refs/__init__.py", line 3659, in _reshape_view_helper
[rank1]: shape = utils.infer_size(shape, a.numel())
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/_prims_common/__init__.py", line 901, in infer_size
[rank1]: torch._check(
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/__init__.py", line 1564, in _check
[rank1]: _check_with(RuntimeError, cond, message)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/torch/__init__.py", line 1546, in _check_with
[rank1]: raise error_type(message_evaluated)
[rank1]: torch._dynamo.exc.TorchRuntimeError: Failed running call_method reshape(*(FakeTensor(..., device='cuda:1', size=(512,), dtype=torch.float16,
[rank1]: grad_fn=<SelectBackward0>), 32, 512), **{}):
[rank1]: shape '[32, 512]' is invalid for input of size 512
[rank1]: from user code:
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/colossalai/quantization/fp8.py", line 839, in _linear_fp8
[rank1]: return _LinearFp8.apply(input, weight, bias)
[rank1]: File "/home/yanzhen/miniconda3/envs/colossal/lib/python3.9/site-packages/colossalai/quantization/fp8.py", line 809, in forward
[rank1]: return out.reshape(*ctx.x_shape[:-1], w.shape[0])
[rank1]: Set TORCH_LOGS="+dynamo" and TORCHDYNAMO_VERBOSE=1 for more information
[rank1]: You can suppress this exception and fall back to eager by setting:
[rank1]: import torch._dynamo
[rank1]: torch._dynamo.config.suppress_errors = True
Is there an existing issue for this bug?
The bug has not been fixed in the latest main branch
Do you feel comfortable sharing a concise (minimal) script that reproduces the error? :)
Yes, I will share a minimal reproducible script.
🐛 Describe the bug
It seems that the
fp8feature inLowLevelZeroPluginis not working properly, as it raises the following error:shape '[32, 512]' is invalid for input of size 512.The following script can reproduce the issue. In this script,
use_fp8=trueis set when usingLowLevelZeroPlugin:Running the command:
produces the following error log (only a portion is shown due to length):
Environment