Skip to content

Commit

Permalink
WIP: Need to fix tests to work in C++ mode
Browse files Browse the repository at this point in the history
  • Loading branch information
dagardner-nv committed Dec 21, 2023
1 parent 64b77c3 commit 1f5ae81
Show file tree
Hide file tree
Showing 3 changed files with 2 additions and 17 deletions.
4 changes: 2 additions & 2 deletions tests/examples/log_parsing/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@
import pytest


@pytest.fixture
def config(config): # pylint: disable=redefined-outer-name
@pytest.fixture(name="config")
def config_fixture(config):
"""
The log_parsing pipelie requires NLP mode. Set this here so all the tests don't need to set it themselves.
"""
Expand Down
14 changes: 0 additions & 14 deletions tests/examples/log_parsing/test_inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,12 +35,6 @@
from morpheus.utils.type_aliases import DataFrameType


@pytest.fixture(name="config")
def config_fixture(config: Config):
config.mode = PipelineModes.NLP
yield config


def build_response_mem(log_test_data_dir: str) -> TensorMemory:
# we have tensor data for the first five rows
count = 5
Expand Down Expand Up @@ -113,11 +107,9 @@ def _check_worker(inference_mod: types.ModuleType, worker: TritonInferenceWorker
assert worker._inout_mapping == expected_mapping


@pytest.mark.use_python
@pytest.mark.import_mod([os.path.join(TEST_DIRS.examples_dir, 'log_parsing', 'inference.py')])
def test_log_parsing_triton_inference_log_parsing_constructor(config: Config,
import_mod: typing.List[types.ModuleType]):
config.mode = PipelineModes.NLP
inference_mod = import_mod[0]
worker = inference_mod.TritonInferenceLogParsing(inf_queue=ProducerConsumerQueue(),
c=config,
Expand All @@ -131,7 +123,6 @@ def test_log_parsing_triton_inference_log_parsing_constructor(config: Config,
_check_worker(inference_mod, worker, {'test': 'this'})


@pytest.mark.use_python
@pytest.mark.import_mod([os.path.join(TEST_DIRS.examples_dir, 'log_parsing', 'inference.py')])
@pytest.mark.parametrize("mess_offset,mess_count,offset,count", [(0, 20, 0, 20), (5, 10, 5, 10)])
def test_log_parsing_triton_inference_log_parsing_build_output_message(config: Config,
Expand Down Expand Up @@ -173,7 +164,6 @@ def test_log_parsing_triton_inference_log_parsing_build_output_message(config: C
assert msg.seq_ids.shape == (count, 3)


@pytest.mark.use_python
@pytest.mark.import_mod([os.path.join(TEST_DIRS.examples_dir, 'log_parsing', 'inference.py')])
def test_log_parsing_inference_stage_constructor(config: Config, import_mod: typing.List[types.ModuleType]):
inference_mod = import_mod[0]
Expand Down Expand Up @@ -205,7 +195,6 @@ def test_log_parsing_inference_stage_constructor(config: Config, import_mod: typ
assert stage._kwargs == expected_kwargs


@pytest.mark.use_python
@pytest.mark.import_mod([os.path.join(TEST_DIRS.examples_dir, 'log_parsing', 'inference.py')])
def test_log_parsing_inference_stage_get_inference_worker(config: Config, import_mod: typing.List[types.ModuleType]):
inference_mod = import_mod[0]
Expand All @@ -225,7 +214,6 @@ def test_log_parsing_inference_stage_get_inference_worker(config: Config, import
_check_worker(inference_mod, worker, expected_mapping)


@pytest.mark.use_python
@pytest.mark.usefixtures("manual_seed", "config")
@pytest.mark.import_mod(os.path.join(TEST_DIRS.examples_dir, 'log_parsing', 'inference.py'))
@pytest.mark.parametrize("mess_offset,mess_count,offset,count", [(0, 5, 0, 5), (5, 5, 0, 5)])
Expand All @@ -237,8 +225,6 @@ def test_log_parsing_inference_stage_convert_one_response(import_mod: typing.Lis
count):
inference_mod = import_mod

ttl_count = len(filter_probs_df)

input_res = build_response_mem(os.path.join(TEST_DIRS.tests_data_dir, 'examples/log_parsing'))

# confidences, labels & input_ids all have the same shape
Expand Down
1 change: 0 additions & 1 deletion tests/examples/log_parsing/test_log_parsing_pipe.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,6 @@ def _run_mocked_pipeline(config: Config, dataset_cudf: DatasetManager, import_mo


@pytest.mark.slow
@pytest.mark.use_python
@pytest.mark.import_mod([
os.path.join(TEST_DIRS.examples_dir, 'log_parsing', 'inference.py'),
os.path.join(TEST_DIRS.examples_dir, 'log_parsing', 'postprocessing.py')
Expand Down

0 comments on commit 1f5ae81

Please sign in to comment.