From 61eaa05521fa59dacefe4165f874d105035d3f1a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Wed, 20 Aug 2025 20:59:51 +0200 Subject: [PATCH 01/20] WIP - first attempt towards automatic batch casting --- .../execution_engine/v1/compiler/entities.py | 3 +- .../v1/compiler/graph_constructor.py | 224 +++++++++++----- .../execution_data_manager/manager.py | 22 +- .../step_input_assembler.py | 28 +- .../plugin_image_producer/__init__.py | 240 ++++++++++++++++++ ...ng_scalars_to_fit_into_batch_parameters.py | 65 +++++ 6 files changed, 515 insertions(+), 67 deletions(-) create mode 100644 tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py create mode 100644 tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py diff --git a/inference/core/workflows/execution_engine/v1/compiler/entities.py b/inference/core/workflows/execution_engine/v1/compiler/entities.py index 6c9b945c6e..d49b2a4cb3 100644 --- a/inference/core/workflows/execution_engine/v1/compiler/entities.py +++ b/inference/core/workflows/execution_engine/v1/compiler/entities.py @@ -224,6 +224,7 @@ class StepNode(ExecutionGraphNode): child_execution_branches: Dict[str, str] = field(default_factory=dict) execution_branches_impacting_inputs: Set[str] = field(default_factory=set) batch_oriented_parameters: Set[str] = field(default_factory=set) + scalar_parameters_to_be_batched: Set[str] = field(default_factory=set) step_execution_dimensionality: int = 0 def controls_flow(self) -> bool: @@ -236,7 +237,7 @@ def output_dimensionality(self) -> int: return len(self.data_lineage) def is_batch_oriented(self) -> bool: - return len(self.batch_oriented_parameters) > 0 + return len(self.batch_oriented_parameters) + len(self.scalar_parameters_to_be_batched) > 0 @dataclass(frozen=True) diff --git a/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py b/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py index 176e0ab2d2..8526bfb8f6 100644 --- a/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py +++ b/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py @@ -159,6 +159,11 @@ def add_input_nodes_for_graph( ) data_lineage = [WORKFLOW_INPUT_BATCH_LINEAGE_ID] for _ in range(input_spec.dimensionality - 1): + # TODO: this may end up being a bug - with ability for multi-step debugging, if we will + # ever have a situation that there will be multiple step outputs with nested + # dimensionality with the same lineage, this re-construction method will + # assign a different lineage identifier, causing the inputs being non-composable in + # a single execution branch data_lineage.append(f"{uuid4()}") else: data_lineage = [] @@ -690,20 +695,44 @@ def denote_data_flow_for_step( node=node, expected_type=StepNode, ) + parsed_step_input_selectors: List[ParsedSelector] = execution_graph.nodes[node][ + PARSED_NODE_INPUT_SELECTORS_PROPERTY + ] + batch_compatibility_of_properties = retrieve_batch_compatibility_of_input_selectors( + input_selectors=parsed_step_input_selectors + ) + scalar_parameters_to_be_batched = verify_declared_batch_compatibility_against_actual_inputs( + node=node, + step_node_data=step_node_data, + input_data=input_data, + batch_compatibility_of_properties=batch_compatibility_of_properties, + ) + step_node_data.scalar_parameters_to_be_batched = scalar_parameters_to_be_batched + input_dimensionality_offsets = manifest.get_input_dimensionality_offsets() + print("input_dimensionality_offsets", input_dimensionality_offsets) + verify_step_input_dimensionality_offsets( + step_name=step_name, + input_dimensionality_offsets=input_dimensionality_offsets, + ) inputs_dimensionalities = get_inputs_dimensionalities( step_name=step_name, step_type=manifest.type, input_data=input_data, + scalar_parameters_to_be_batched=scalar_parameters_to_be_batched, + input_dimensionality_offsets=input_dimensionality_offsets, ) + print("inputs_dimensionalities", inputs_dimensionalities) logger.debug( f"For step: {node}, detected the following input dimensionalities: {inputs_dimensionalities}" ) parameters_with_batch_inputs = grab_parameters_defining_batch_inputs( inputs_dimensionalities=inputs_dimensionalities, ) + print("parameters_with_batch_inputs", parameters_with_batch_inputs) dimensionality_reference_property = manifest.get_dimensionality_reference_property() - input_dimensionality_offsets = manifest.get_input_dimensionality_offsets() + print("dimensionality_reference_property", dimensionality_reference_property) output_dimensionality_offset = manifest.get_output_dimensionality_offset() + print("output_dimensionality_offset", output_dimensionality_offset) verify_step_input_dimensionality_offsets( step_name=step_name, input_dimensionality_offsets=input_dimensionality_offsets, @@ -722,7 +751,12 @@ def denote_data_flow_for_step( inputs_dimensionalities=inputs_dimensionalities, dimensionality_offstes=input_dimensionality_offsets, ) - all_lineages = get_input_data_lineage(step_name=step_name, input_data=input_data) + all_lineages = get_input_data_lineage_including_auto_batch_casting( + step_name=step_name, + input_data=input_data, + scalar_parameters_to_be_batched=scalar_parameters_to_be_batched, + inputs_dimensionalities=inputs_dimensionalities, + ) verify_compatibility_of_input_data_lineage_with_control_flow_lineage( step_name=step_name, inputs_lineage=all_lineages, @@ -738,58 +772,18 @@ def denote_data_flow_for_step( output_dimensionality_offset=output_dimensionality_offset, ) ) - parsed_step_input_selectors: List[ParsedSelector] = execution_graph.nodes[node][ - PARSED_NODE_INPUT_SELECTORS_PROPERTY - ] - input_property2batch_expected = defaultdict(set) - for parsed_selector in parsed_step_input_selectors: - for reference in parsed_selector.definition.allowed_references: - input_property2batch_expected[ - parsed_selector.definition.property_name - ].update(reference.points_to_batch) - for property_name, input_definition in input_data.items(): - if property_name not in input_property2batch_expected: - # only values plugged vi selectors are to be validated - continue - if input_definition.is_compound_input(): - actual_input_is_batch = { - element.is_batch_oriented() - for element in input_definition.iterate_through_definitions() - } + truly_batch_parameters = parameters_with_batch_inputs.difference(scalar_parameters_to_be_batched) + if len(scalar_parameters_to_be_batched) > 0: + if len(truly_batch_parameters) > 0: + data_lineage = [WORKFLOW_INPUT_BATCH_LINEAGE_ID] else: - actual_input_is_batch = {input_definition.is_batch_oriented()} - batch_input_expected = input_property2batch_expected[property_name] - step_accepts_batch_input = step_node_data.step_manifest.accepts_batch_input() - if ( - step_accepts_batch_input - and batch_input_expected == {False} - and True in actual_input_is_batch - ): - raise ExecutionGraphStructureError( - public_message=f"Detected invalid reference plugged " - f"into property `{property_name}` of step `{node}` - the step " - f"property do not accept batch-oriented inputs, yet the input selector " - f"holds one - this indicates the problem with " - f"construction of your Workflow - usually the problem occurs when non-batch oriented " - f"step inputs are filled with outputs of batch-oriented steps or batch-oriented inputs.", - context="workflow_compilation | execution_graph_construction", - ) - if ( - step_accepts_batch_input - and batch_input_expected == {True} - and False in actual_input_is_batch - ): - raise ExecutionGraphStructureError( - public_message=f"Detected invalid reference plugged " - f"into property `{property_name}` of step `{node}` - the step " - f"property strictly requires batch-oriented inputs, yet the input selector " - f"holds non-batch oriented input - this indicates the " - f"problem with construction of your Workflow - usually the problem occurs when " - f"non-batch oriented step inputs are filled with outputs of non batch-oriented " - f"steps or non batch-oriented inputs.", - context="workflow_compilation | execution_graph_construction", + auto_casted_batch_min_dimensionality = min( + dim for p in scalar_parameters_to_be_batched for dim in inputs_dimensionalities[p] ) - if not parameters_with_batch_inputs: + data_lineage = [WORKFLOW_INPUT_BATCH_LINEAGE_ID] + for i in range(auto_casted_batch_min_dimensionality - 1): + data_lineage.append(f"auto-casted-dim-{i}") + elif not truly_batch_parameters: data_lineage = [] else: data_lineage = establish_batch_oriented_step_lineage( @@ -1181,6 +1175,8 @@ def verify_output_offset( output_dimensionality_offset: int, ) -> None: if not parameters_with_batch_inputs and output_dimensionality_offset != 0: + # TODO: this needs to be changed - we should take into account the params which will be + # batch auto-casted here, otherwise we will not be able to operate normally with BAC raise BlockInterfaceError( public_message=f"Block defining step {step_name} defines dimensionality offset different " f"than zero while taking only non-batch parameters, which is not allowed.", @@ -1403,10 +1399,36 @@ def get_batch_lineage_prefixes(lineage: List[str]) -> List[List[str]]: def get_inputs_dimensionalities( - step_name: str, step_type: str, input_data: StepInputData + step_name: str, + step_type: str, + input_data: StepInputData, + scalar_parameters_to_be_batched: Set[str], + input_dimensionality_offsets: Dict[str, int], ) -> Dict[str, Set[int]]: result = defaultdict(set) dimensionalities_spotted = set() + offset_parameters = {parameter: value for parameter,value in input_dimensionality_offsets.items() if value > 0} + non_offset_parameters_dimensionality = { + property_name: input_definition.get_dimensionality() + for property_name, input_definition in input_data.items() + if input_definition.is_batch_oriented() and property_name not in offset_parameters + } + non_offset_parameters_dimensionality_values = set(non_offset_parameters_dimensionality.values()) + if len(non_offset_parameters_dimensionality_values) > 1: + raise StepInputDimensionalityError( + public_message=f"For step {step_name} attempted to plug input data that are in different dimensions, " + f"whereas block defines the inputs to be equal in that terms. Problematic properties and " + f"their dimensionalities: {non_offset_parameters_dimensionality}", + context="workflow_compilation | execution_graph_construction | collecting_step_input_data", + blocks_errors=[ + WorkflowBlockError( + block_id=step_name, + block_type=step_type, + block_details=f"Dimensionality of input parameters differs by more than 1. Detected dimensions: {dict(result)}", + ) + ], + ) + non_offset_parameters_dimensionality_value = non_offset_parameters_dimensionality_values.pop() if len(non_offset_parameters_dimensionality_values) > 0 else 1 for property_name, input_definition in input_data.items(): if input_definition.is_compound_input(): result[property_name] = get_compound_input_dimensionality( @@ -1414,9 +1436,18 @@ def get_inputs_dimensionalities( step_type=step_type, property_name=property_name, input_definition=input_definition, + scalar_parameters_to_be_batched=scalar_parameters_to_be_batched, + non_offset_parameters_dimensionality_value=non_offset_parameters_dimensionality_value, + offset_parameters=offset_parameters, ) else: - result[property_name] = {input_definition.get_dimensionality()} + if property_name in scalar_parameters_to_be_batched: + if property_name not in offset_parameters: + result[property_name] = {non_offset_parameters_dimensionality_value} + else: + result[property_name] = non_offset_parameters_dimensionality_value + offset_parameters[property_name] + else: + result[property_name] = {input_definition.get_dimensionality()} dimensionalities_spotted.update(result[property_name]) non_zero_dimensionalities_spotted = {d for d in dimensionalities_spotted if d != 0} if len(non_zero_dimensionalities_spotted) > 0: @@ -1443,10 +1474,18 @@ def get_compound_input_dimensionality( property_name: str, step_type: str, input_definition: CompoundStepInputDefinition, + scalar_parameters_to_be_batched: Set[str], + offset_parameters: Dict[str, int], + non_offset_parameters_dimensionality_value: int, ) -> Set[int]: dimensionalities_spotted = set() for definition in input_definition.iterate_through_definitions(): - dimensionalities_spotted.add(definition.get_dimensionality()) + if property_name not in scalar_parameters_to_be_batched or definition.is_batch_oriented(): + dimensionalities_spotted.add(definition.get_dimensionality()) + elif property_name not in offset_parameters: + dimensionalities_spotted.add(non_offset_parameters_dimensionality_value) + else: + dimensionalities_spotted.add(non_offset_parameters_dimensionality_value + offset_parameters[property_name]) non_zero_dimensionalities = {e for e in dimensionalities_spotted if e != 0} if len(non_zero_dimensionalities) > 1: raise StepInputDimensionalityError( @@ -1476,12 +1515,67 @@ def grab_parameters_defining_batch_inputs( return result -def get_input_data_lineage( +def retrieve_batch_compatibility_of_input_selectors(input_selectors: List[ParsedSelector]) -> Dict[str, Set[bool]]: + batch_compatibility_of_properties = defaultdict(set) + for parsed_selector in input_selectors: + for reference in parsed_selector.definition.allowed_references: + batch_compatibility_of_properties[ + parsed_selector.definition.property_name + ].update(reference.points_to_batch) + return batch_compatibility_of_properties + + +def verify_declared_batch_compatibility_against_actual_inputs( + node: str, + step_node_data: StepNode, + input_data: StepInputData, + batch_compatibility_of_properties: Dict[str, Set[bool]], +) -> Set[str]: + scalar_parameters_to_be_batched = set() + for property_name, input_definition in input_data.items(): + if property_name not in batch_compatibility_of_properties: + # only values plugged via selectors are to be validated + continue + if input_definition.is_compound_input(): + actual_input_is_batch = { + element.is_batch_oriented() + for element in input_definition.iterate_through_definitions() + } + else: + actual_input_is_batch = {input_definition.is_batch_oriented()} + batch_compatibility = batch_compatibility_of_properties[property_name] + step_accepts_batch_input = step_node_data.step_manifest.accepts_batch_input() + if ( + step_accepts_batch_input + and batch_compatibility == {False} + and True in actual_input_is_batch + ): + raise ExecutionGraphStructureError( + public_message=f"Detected invalid reference plugged " + f"into property `{property_name}` of step `{node}` - the step " + f"property do not accept batch-oriented inputs, yet the input selector " + f"holds one - this indicates the problem with " + f"construction of your Workflow - usually the problem occurs when non-batch oriented " + f"step inputs are filled with outputs of batch-oriented steps or batch-oriented inputs.", + context="workflow_compilation | execution_graph_construction", + ) + if ( + step_accepts_batch_input + and batch_compatibility == {True} + and False in actual_input_is_batch + ): + scalar_parameters_to_be_batched.add(property_name) + return scalar_parameters_to_be_batched + + +def get_input_data_lineage_including_auto_batch_casting( step_name: str, input_data: StepInputData, + scalar_parameters_to_be_batched: Set[str], + inputs_dimensionalities: Dict[str, Set[int]], ) -> List[List[str]]: lineage_deduplication_set = set() - lineages = [] + property_to_lineage: Dict[str, List[List[str]]] = {} for property_name, input_definition in input_data.items(): new_lineages_detected_within_property_data = get_lineage_for_input_property( step_name=step_name, @@ -1489,11 +1583,17 @@ def get_input_data_lineage( input_definition=input_definition, lineage_deduplication_set=lineage_deduplication_set, ) - lineages.extend(new_lineages_detected_within_property_data) - if not lineages: - return lineages - verify_lineages(step_name=step_name, detected_lineages=lineages) - return lineages + property_to_lineage[property_name] = new_lineages_detected_within_property_data + if not property_to_lineage: + return [] + for property_name in scalar_parameters_to_be_batched: + if inputs_dimensionalities[property_name] == 1: + property_to_lineage[property_name] = [[WORKFLOW_INPUT_BATCH_LINEAGE_ID]] + else: + pass + all_lineages = [lineage for lineages in property_to_lineage.values() for lineage in lineages] + verify_lineages(step_name=step_name, detected_lineages=all_lineages) + return all_lineages def get_lineage_for_input_property( diff --git a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py index e0c7178f11..cbb204757d 100644 --- a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py +++ b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py @@ -3,7 +3,7 @@ from networkx import DiGraph from inference.core import logger -from inference.core.workflows.errors import ExecutionEngineRuntimeError +from inference.core.workflows.errors import ExecutionEngineRuntimeError, AssumptionError from inference.core.workflows.execution_engine.constants import ( NODE_COMPILATION_OUTPUT_PROPERTY, ) @@ -386,6 +386,26 @@ def get_batch_data( context="workflow_execution | step_output_registration", ) + # def should_simd_step_output_be_casted_to_scalar(self, step_selector: str) -> bool: + # if not self.is_step_simd(step_selector=step_selector): + # raise AssumptionError( + # public_message=f"Error in execution engine. Attempted to verify SIMD step output auto-casting to " + # f"scalar for step {step_selector} which is not registered as SIMD step. " + # f"This is most likely bug. Contact Roboflow team through github issues " + # f"(https://github.com/roboflow/inference/issues) providing full context of" + # f"the problem - including workflow definition you use.", + # context="workflow_execution | step_output_registration", + # ) + # step_node_data = node_as( + # execution_graph=self._execution_graph, + # node=step_selector, + # expected_type=StepNode, + # ) + # if not step_node_data.scalar_parameters_to_be_batched: + # return False + # a = step_node_data.output_dimensionality - step_node_data.step_execution_dimensionality + # return len(step_node_data.batch_oriented_parameters) == 0 + def is_step_simd(self, step_selector: str) -> bool: step_node_data = node_as( execution_graph=self._execution_graph, diff --git a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py index b1b4f0082f..1fbbdd39a2 100644 --- a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py +++ b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py @@ -377,6 +377,7 @@ def prepare_parameters( runtime_parameters=runtime_parameters, execution_cache=execution_cache, guard_of_indices_wrapping=guard_of_indices_wrapping, + scalar_parameters_to_be_batched=step_node.scalar_parameters_to_be_batched, ) compound_inputs.add(parameter_name) else: @@ -392,6 +393,7 @@ def prepare_parameters( runtime_parameters=runtime_parameters, execution_cache=execution_cache, guard_of_indices_wrapping=guard_of_indices_wrapping, + scalar_parameters_to_be_batched=step_node.scalar_parameters_to_be_batched, ) contains_empty_scalar_step_output_selector = ( contains_empty_scalar_step_output_selector @@ -434,6 +436,7 @@ def get_compound_parameter_value( runtime_parameters: Dict[str, Any], execution_cache: ExecutionCache, guard_of_indices_wrapping: GuardForIndicesWrapping, + scalar_parameters_to_be_batched: Set[str], ) -> Tuple[Union[list, Dict[str, Any]], Optional[List[DynamicBatchIndex]], bool]: contains_empty_scalar_step_output_selector = False batch_indices = [] @@ -452,6 +455,7 @@ def get_compound_parameter_value( runtime_parameters=runtime_parameters, execution_cache=execution_cache, guard_of_indices_wrapping=guard_of_indices_wrapping, + scalar_parameters_to_be_batched=scalar_parameters_to_be_batched, ) result.append(non_compound_parameter_value) contains_empty_scalar_step_output_selector = ( @@ -475,6 +479,7 @@ def get_compound_parameter_value( runtime_parameters=runtime_parameters, execution_cache=execution_cache, guard_of_indices_wrapping=guard_of_indices_wrapping, + scalar_parameters_to_be_batched=scalar_parameters_to_be_batched, ) result[nested_element.parameter_specification.nested_element_key] = ( non_compound_parameter_value @@ -500,23 +505,40 @@ def get_non_compound_parameter_value( runtime_parameters: Dict[str, Any], execution_cache: ExecutionCache, guard_of_indices_wrapping: GuardForIndicesWrapping, + scalar_parameters_to_be_batched: Set[str], ) -> Union[Any, Optional[List[DynamicBatchIndex]], bool]: if not parameter.is_batch_oriented(): + requested_as_batch = parameter.parameter_specification.parameter_name in scalar_parameters_to_be_batched if parameter.points_to_input(): input_parameter: DynamicStepInputDefinition = parameter # type: ignore parameter_name = get_last_chunk_of_selector( selector=input_parameter.selector ) - return runtime_parameters[parameter_name], None, False + if not requested_as_batch: + return runtime_parameters[parameter_name], None, False + else: + indices = [(0, )] + batched_value = Batch(content=[runtime_parameters[parameter_name]], indices=indices) + return batched_value, indices, False elif parameter.points_to_step_output(): input_parameter: DynamicStepInputDefinition = parameter # type: ignore value = execution_cache.get_non_batch_output( selector=input_parameter.selector ) - return value, None, value is None + if not requested_as_batch: + return value, None, value is None + else: + indices = [(0,)] + batched_value = Batch(content=[value], indices=indices) + return batched_value, indices, value is None else: static_input: StaticStepInputDefinition = parameter # type: ignore - return static_input.value, None, False + if not requested_as_batch: + return static_input.value, None, False + else: + indices = [(0,)] + batched_value = Batch(content=[static_input.value], indices=indices) + return batched_value, indices, False dynamic_parameter: DynamicStepInputDefinition = parameter # type: ignore parameter_dimensionality = dynamic_parameter.get_dimensionality() lineage_indices = dynamic_batches_manager.get_indices_for_data_lineage( diff --git a/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py b/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py new file mode 100644 index 0000000000..b6d3a04a9a --- /dev/null +++ b/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py @@ -0,0 +1,240 @@ +import json +from typing import Literal, List, Optional, Type, Any +from uuid import uuid4 + +import numpy as np + +from inference.core.workflows.execution_engine.entities.base import OutputDefinition, WorkflowImageData, \ + ImageParentMetadata, Batch +from inference.core.workflows.execution_engine.entities.types import IMAGE_KIND, Selector, STRING_KIND +from inference.core.workflows.prototypes.block import WorkflowBlockManifest, WorkflowBlock, BlockResult + + +class ImageProducerBlockManifest(WorkflowBlockManifest): + type: Literal["ImageProducer"] + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="image", kind=[IMAGE_KIND])] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + +class ImageProducerBlock(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return ImageProducerBlockManifest + + def run(self) -> BlockResult: + image = WorkflowImageData( + parent_metadata=ImageParentMetadata(parent_id=f"image_producer.{uuid4()}"), + numpy_image=np.zeros((192, 168, 3), dtype=np.uint8) + ) + return {"image": image} + + +class SingleImageConsumerManifest(WorkflowBlockManifest): + type: Literal["ImageConsumer"] + images: Selector(kind=[IMAGE_KIND]) + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="shapes", kind=[STRING_KIND])] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + @classmethod + def get_parameters_accepting_batches(cls) -> List[str]: + return ["images"] + + +class SingleImageConsumer(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return SingleImageConsumerManifest + + def run(self, images: Batch[WorkflowImageData]) -> BlockResult: + results = [] + for image in images: + results.append({"shapes": json.dumps(image.numpy_image.shape)}) + return results + + +class MultiSIMDImageConsumerManifest(WorkflowBlockManifest): + type: Literal["MultiSIMDImageConsumer"] + images_x: Selector(kind=[IMAGE_KIND]) + images_y: Selector(kind=[IMAGE_KIND]) + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="metadata", kind=[STRING_KIND])] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + @classmethod + def get_parameters_accepting_batches(cls) -> List[str]: + return ["images_x", "images_y"] + + +class MultiSIMDImageConsumer(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return MultiSIMDImageConsumerManifest + + def run(self, images_x: Batch[WorkflowImageData], images_y: Batch[WorkflowImageData]) -> BlockResult: + results = [] + for image_x, image_y in zip(images_x, images_y): + results.append({"shapes": json.dumps(image_x.numpy_image.shape) + json.dumps(image_y.numpy_image.shape)}) + return results + + +class MultiImageConsumerManifest(WorkflowBlockManifest): + type: Literal["MultiImageConsumer"] + images_x: Selector(kind=[IMAGE_KIND]) + images_y: Selector(kind=[IMAGE_KIND]) + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="metadata", kind=[STRING_KIND])] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + +class MultiImageConsumer(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return MultiImageConsumerManifest + + def run(self, images_x: WorkflowImageData, images_y: WorkflowImageData) -> BlockResult: + return {"shapes": json.dumps(images_x.numpy_image.shape) + json.dumps(images_y.numpy_image.shape)} + + +class MultiImageConsumerRaisingDimManifest(WorkflowBlockManifest): + type: Literal["MultiImageConsumerRaisingDim"] + images_x: Selector(kind=[IMAGE_KIND]) + images_y: Selector(kind=[IMAGE_KIND]) + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="metadata", kind=[STRING_KIND])] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + @classmethod + def get_output_dimensionality_offset(cls) -> int: + return 1 + + +class MultiImageConsumerRaisingDim(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return MultiImageConsumerRaisingDimManifest + + def run(self, images_x: WorkflowImageData, images_y: WorkflowImageData) -> BlockResult: + return [{"shapes": json.dumps(images_x.numpy_image.shape) + json.dumps(images_y.numpy_image.shape)}] + + +class MultiSIMDImageConsumerRaisingDimManifest(WorkflowBlockManifest): + type: Literal["MultiSIMDImageConsumerRaisingDim"] + images_x: Selector(kind=[IMAGE_KIND]) + images_y: Selector(kind=[IMAGE_KIND]) + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="metadata", kind=[STRING_KIND])] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + @classmethod + def get_output_dimensionality_offset(cls) -> int: + return 1 + + +class MultiSIMDImageConsumerRaisingDim(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return MultiSIMDImageConsumerRaisingDimManifest + + def run(self, images_x: Batch[WorkflowImageData], images_y: Batch[WorkflowImageData]) -> BlockResult: + results = [] + for image_x, image_y in zip(images_x, images_y): + results.append([{"shapes": json.dumps(image_x.numpy_image.shape) + json.dumps(image_y.numpy_image.shape)}]) + return results + + +class IdentityManifest(WorkflowBlockManifest): + type: Literal["Identity"] + x: Selector() + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="x")] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + +class IdentityBlock(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return IdentityManifest + + def run(self, x: Any) -> BlockResult: + return {"x": x} + + +class IdentitySIMDManifest(WorkflowBlockManifest): + type: Literal["IdentitySIMD"] + x: Selector() + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="x")] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + +class IdentitySIMDBlock(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return IdentitySIMDManifest + + def run(self, x: Batch[Any]) -> BlockResult: + assert isinstance(x, Batch) + return [{"x": x_el} for x_el in x] + + +def load_blocks() -> List[Type[WorkflowBlock]]: + return [ + ImageProducerBlock, + SingleImageConsumer, + MultiSIMDImageConsumer, + MultiImageConsumer, + MultiImageConsumerRaisingDim, + MultiSIMDImageConsumerRaisingDim, + IdentityBlock, + IdentitySIMDBlock + ] diff --git a/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py b/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py new file mode 100644 index 0000000000..b587b53849 --- /dev/null +++ b/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py @@ -0,0 +1,65 @@ +from unittest import mock +from unittest.mock import MagicMock + +from inference.core.env import WORKFLOWS_MAX_CONCURRENT_STEPS +from inference.core.managers.base import ModelManager +from inference.core.workflows.core_steps.common.entities import StepExecutionMode +from inference.core.workflows.execution_engine.core import ExecutionEngine +from inference.core.workflows.execution_engine.introspection import blocks_loader + +WORKFLOW_PROCESSING_VIDEO_METADATA = { + "version": "1.1", + "inputs": [], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer", + }, + { + "type": "ImageConsumer", + "name": "image_consumer", + "images": "$steps.image_producer.image" + } + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.image_consumer.shapes", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_producing_image_and_consuming_it_in_block_accepting_single_batch_input( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + """ + In this test scenario, we verify compatibility of new input type (WorkflowVideoMetadata) + with Workflows compiler and execution engine. + """ + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_PROCESSING_VIDEO_METADATA, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run( + runtime_parameters={} + ) + + # then + print(result) + raise Exception() From 0ffd261e9e87f4839c131864d82191d22ae9083a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Thu, 21 Aug 2025 10:17:02 +0200 Subject: [PATCH 02/20] WIP - first version kinda working e2e, yet not extensively tested --- .../execution_engine/v1/compiler/entities.py | 2 +- .../v1/compiler/graph_constructor.py | 61 +++++++++++++------ .../execution_engine/v1/executor/core.py | 4 ++ .../dynamic_batches_manager.py | 1 + .../execution_data_manager/execution_cache.py | 2 +- .../execution_data_manager/manager.py | 52 +++++++++++++++- .../v1/executor/output_constructor.py | 2 + 7 files changed, 102 insertions(+), 22 deletions(-) diff --git a/inference/core/workflows/execution_engine/v1/compiler/entities.py b/inference/core/workflows/execution_engine/v1/compiler/entities.py index d49b2a4cb3..afc741867f 100644 --- a/inference/core/workflows/execution_engine/v1/compiler/entities.py +++ b/inference/core/workflows/execution_engine/v1/compiler/entities.py @@ -237,7 +237,7 @@ def output_dimensionality(self) -> int: return len(self.data_lineage) def is_batch_oriented(self) -> bool: - return len(self.batch_oriented_parameters) + len(self.scalar_parameters_to_be_batched) > 0 + return len(self.batch_oriented_parameters) > 0 @dataclass(frozen=True) diff --git a/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py b/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py index 8526bfb8f6..0a0a722e42 100644 --- a/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py +++ b/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py @@ -751,11 +751,10 @@ def denote_data_flow_for_step( inputs_dimensionalities=inputs_dimensionalities, dimensionality_offstes=input_dimensionality_offsets, ) - all_lineages = get_input_data_lineage_including_auto_batch_casting( + all_lineages = get_input_data_lineage_excluding_auto_batch_casting( step_name=step_name, input_data=input_data, scalar_parameters_to_be_batched=scalar_parameters_to_be_batched, - inputs_dimensionalities=inputs_dimensionalities, ) verify_compatibility_of_input_data_lineage_with_control_flow_lineage( step_name=step_name, @@ -773,17 +772,7 @@ def denote_data_flow_for_step( ) ) truly_batch_parameters = parameters_with_batch_inputs.difference(scalar_parameters_to_be_batched) - if len(scalar_parameters_to_be_batched) > 0: - if len(truly_batch_parameters) > 0: - data_lineage = [WORKFLOW_INPUT_BATCH_LINEAGE_ID] - else: - auto_casted_batch_min_dimensionality = min( - dim for p in scalar_parameters_to_be_batched for dim in inputs_dimensionalities[p] - ) - data_lineage = [WORKFLOW_INPUT_BATCH_LINEAGE_ID] - for i in range(auto_casted_batch_min_dimensionality - 1): - data_lineage.append(f"auto-casted-dim-{i}") - elif not truly_batch_parameters: + if not truly_batch_parameters: data_lineage = [] else: data_lineage = establish_batch_oriented_step_lineage( @@ -793,6 +782,7 @@ def denote_data_flow_for_step( dimensionality_reference_property=dimensionality_reference_property, output_dimensionality_offset=output_dimensionality_offset, ) + print("Data lineage of block output", data_lineage) step_node_data.data_lineage = data_lineage return execution_graph @@ -1568,6 +1558,29 @@ def verify_declared_batch_compatibility_against_actual_inputs( return scalar_parameters_to_be_batched +def get_input_data_lineage_excluding_auto_batch_casting( + step_name: str, + input_data: StepInputData, + scalar_parameters_to_be_batched: Set[str], +) -> List[List[str]]: + lineage_deduplication_set = set() + lineages = [] + for property_name, input_definition in input_data.items(): + if property_name in scalar_parameters_to_be_batched: + continue + new_lineages_detected_within_property_data = get_lineage_for_input_property( + step_name=step_name, + property_name=property_name, + input_definition=input_definition, + lineage_deduplication_set=lineage_deduplication_set, + ) + lineages.extend(new_lineages_detected_within_property_data) + if not lineages: + return lineages + verify_lineages(step_name=step_name, detected_lineages=lineages) + return lineages + + def get_input_data_lineage_including_auto_batch_casting( step_name: str, input_data: StepInputData, @@ -1586,12 +1599,24 @@ def get_input_data_lineage_including_auto_batch_casting( property_to_lineage[property_name] = new_lineages_detected_within_property_data if not property_to_lineage: return [] + all_lineages = [lineage for lineages in property_to_lineage.values() for lineage in lineages if len(lineage) > 0] + if len(all_lineages): + verify_lineages(step_name=step_name, detected_lineages=all_lineages) + max_len_lineage = all_lineages[0] + for lineage in all_lineages: + if len(lineage) > len(max_len_lineage): + max_len_lineage = lineage + else: + max_len_lineage = [WORKFLOW_INPUT_BATCH_LINEAGE_ID] for property_name in scalar_parameters_to_be_batched: - if inputs_dimensionalities[property_name] == 1: - property_to_lineage[property_name] = [[WORKFLOW_INPUT_BATCH_LINEAGE_ID]] - else: - pass - all_lineages = [lineage for lineages in property_to_lineage.values() for lineage in lineages] + max_dimensionality_for_compound_property = max(inputs_dimensionalities[property_name]) + auto_casted_lineage = max_len_lineage[:min(len(max_len_lineage), max_dimensionality_for_compound_property)] + for i in range(max(0, max_dimensionality_for_compound_property - len(max_len_lineage))): + auto_casted_lineage.append(f"auto-casted-lineage-{len(max_len_lineage) + 1}") + lineage_id = identify_lineage(lineage=auto_casted_lineage) + if lineage_id not in lineage_deduplication_set: + lineage_deduplication_set.add(lineage_id) + all_lineages.append(auto_casted_lineage) verify_lineages(step_name=step_name, detected_lineages=all_lineages) return all_lineages diff --git a/inference/core/workflows/execution_engine/v1/executor/core.py b/inference/core/workflows/execution_engine/v1/executor/core.py index fa4e2d18f4..e3f7d54707 100644 --- a/inference/core/workflows/execution_engine/v1/executor/core.py +++ b/inference/core/workflows/execution_engine/v1/executor/core.py @@ -156,6 +156,7 @@ def run_step( execution_data_manager: ExecutionDataManager, profiler: WorkflowsProfiler, ) -> None: + print(f"{step_selector} - IS SIMD: {execution_data_manager.is_step_simd(step_selector=step_selector)}") if execution_data_manager.is_step_simd(step_selector=step_selector): return run_simd_step( step_selector=step_selector, @@ -180,6 +181,7 @@ def run_simd_step( step_name = get_last_chunk_of_selector(selector=step_selector) step_instance = workflow.steps[step_name].step step_manifest = workflow.steps[step_name].manifest + print(f"{step_selector} - accepts_batch_input: {step_manifest.accepts_batch_input()}") if step_manifest.accepts_batch_input(): return run_simd_step_in_batch_mode( step_selector=step_selector, @@ -209,6 +211,7 @@ def run_simd_step_in_batch_mode( step_input = execution_data_manager.get_simd_step_input( step_selector=step_selector, ) + print(f"step_input: {step_input}") with profiler.profile_execution_phase( name="step_code_execution", categories=["workflow_block_operation"], @@ -222,6 +225,7 @@ def run_simd_step_in_batch_mode( outputs = [] else: outputs = step_instance.run(**step_input.parameters) + print(f"outputs: {outputs}") with profiler.profile_execution_phase( name="step_output_registration", categories=["execution_engine_operation"], diff --git a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/dynamic_batches_manager.py b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/dynamic_batches_manager.py index a35938ea42..0771aa2689 100644 --- a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/dynamic_batches_manager.py +++ b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/dynamic_batches_manager.py @@ -28,6 +28,7 @@ def init( execution_graph=execution_graph, runtime_parameters=runtime_parameters, ) + print("lineage2indices", lineage2indices) return cls(lineage2indices=lineage2indices) def __init__( diff --git a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/execution_cache.py b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/execution_cache.py index 9626493462..7bf5148bd8 100644 --- a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/execution_cache.py +++ b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/execution_cache.py @@ -41,7 +41,7 @@ def init( expected_type=StepNode, ) step_name = node_data.step_manifest.name - compatible_with_batches = node_data.is_batch_oriented() + compatible_with_batches = node_data.output_dimensionality > 0 outputs = node_data.step_manifest.get_actual_outputs() cache.declare_step( step_name=step_name, diff --git a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py index cbb204757d..e5b4412b52 100644 --- a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py +++ b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py @@ -220,6 +220,39 @@ def register_simd_step_output( node=step_selector, expected_type=StepNode, ) + print( + f"Output data lineage: ", + step_node.data_lineage, + step_node.output_dimensionality, + step_node.step_execution_dimensionality, + ) + step_name = get_last_chunk_of_selector(selector=step_selector) + if step_node.output_dimensionality == 0: + print("COLLAPSE") + # SIMD step collapsing into scalar (can happen for auto-batch casting of parameters) + if not isinstance(outputs, list) or len(outputs) != 1: + raise ExecutionEngineRuntimeError( + public_message=f"Error in execution engine. In context of SIMD step: {step_selector} attempts to " + f"register output which should collapse into a scalar, but detected batched output " + f"with more than a single element (or incompatible output), " + f"making the operation not possible. This is most likely bug (either a block or " + f"Execution Engine is faulty). Contact Roboflow team through github issues " + f"(https://github.com/roboflow/inference/issues) providing full context of" + f"the problem - including workflow definition you use.", + context="workflow_execution | step_output_registration", + ) + output = outputs[0] + if isinstance(output, FlowControl): + self._register_flow_control_output_for_non_simd_step( + step_node=step_node, + output=output, + ) + return None + self._execution_cache.register_non_batch_step_outputs( + step_name=step_name, + outputs=output, + ) + return None if ( step_node.output_dimensionality - step_node.step_execution_dimensionality ) > 0: @@ -231,7 +264,6 @@ def register_simd_step_output( lineage=step_node.data_lineage, indices=indices, ) - step_name = get_last_chunk_of_selector(selector=step_selector) if step_node.child_execution_branches: if not all(isinstance(element, FlowControl) for element in outputs): raise ExecutionEngineRuntimeError( @@ -320,7 +352,23 @@ def get_non_batch_data(self, selector: str) -> Any: return self._runtime_parameters[input_name] elif is_step_selector( selector_or_value=potential_step_selector - ) and not self.is_step_simd(step_selector=potential_step_selector): + ): + step_node_data = node_as( + execution_graph=self._execution_graph, + node=potential_step_selector, + expected_type=StepNode, + ) + if step_node_data.output_dimensionality != 0: + raise ExecutionEngineRuntimeError( + public_message=f"Error in execution engine. Attempted to get value of: {selector}, " + f"which was supposed to be registered as scalar output, but in fact Execution " + f"Engine denoted the output as batched one (with dimensionality: " + f"{step_node_data.output_dimensionality}). " + f"This is most likely bug. Contact Roboflow team through github issues " + f"(https://github.com/roboflow/inference/issues) providing full context of" + f"the problem - including workflow definition you use.", + context="workflow_execution | getting_workflow_data", + ) step_name = get_last_chunk_of_selector(selector=potential_step_selector) if selector.endswith(".*"): return self._execution_cache.get_all_non_batch_step_outputs( diff --git a/inference/core/workflows/execution_engine/v1/executor/output_constructor.py b/inference/core/workflows/execution_engine/v1/executor/output_constructor.py index 2836911fb3..7da2c224b1 100644 --- a/inference/core/workflows/execution_engine/v1/executor/output_constructor.py +++ b/inference/core/workflows/execution_engine/v1/executor/output_constructor.py @@ -48,6 +48,7 @@ def construct_workflow_output( batch_oriented_outputs = { output for output, indices in output_name2indices.items() if indices is not None } + print("batch_oriented_outputs", batch_oriented_outputs) kinds_of_output_nodes = { output.name: node_as( execution_graph=execution_graph, @@ -60,6 +61,7 @@ def construct_workflow_output( for output in workflow_outputs: if output.name in batch_oriented_outputs: continue + print(f"taking {output.name} from {output.selector} as non batch") data_piece = execution_data_manager.get_non_batch_data(selector=output.selector) if serialize_results: output_kind = kinds_of_output_nodes[output.name] From ed2da81176d8c8b718926f31b497e8b683e57c13 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Thu, 21 Aug 2025 11:14:27 +0200 Subject: [PATCH 03/20] Fix tests --- .../v1/compiler/graph_constructor.py | 21 +++++++++++-------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py b/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py index 0a0a722e42..471bb3a86c 100644 --- a/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py +++ b/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py @@ -1397,18 +1397,21 @@ def get_inputs_dimensionalities( ) -> Dict[str, Set[int]]: result = defaultdict(set) dimensionalities_spotted = set() - offset_parameters = {parameter: value for parameter,value in input_dimensionality_offsets.items() if value > 0} - non_offset_parameters_dimensionality = { - property_name: input_definition.get_dimensionality() - for property_name, input_definition in input_data.items() - if input_definition.is_batch_oriented() and property_name not in offset_parameters - } - non_offset_parameters_dimensionality_values = set(non_offset_parameters_dimensionality.values()) + offset_parameters = {parameter: value for parameter, value in input_dimensionality_offsets.items() if value > 0} + non_offset_parameters_dimensionality_values = set() + for property_name, input_definition in input_data.items(): + if property_name in offset_parameters: + continue + if input_definition.is_compound_input(): + for value in input_definition.iterate_through_definitions(): + if value.is_batch_oriented(): + non_offset_parameters_dimensionality_values.add(value.get_dimensionality()) + elif input_definition.is_batch_oriented(): + non_offset_parameters_dimensionality_values.add(input_definition.get_dimensionality()) if len(non_offset_parameters_dimensionality_values) > 1: raise StepInputDimensionalityError( public_message=f"For step {step_name} attempted to plug input data that are in different dimensions, " - f"whereas block defines the inputs to be equal in that terms. Problematic properties and " - f"their dimensionalities: {non_offset_parameters_dimensionality}", + f"whereas block defines the inputs to be equal in that terms.", context="workflow_compilation | execution_graph_construction | collecting_step_input_data", blocks_errors=[ WorkflowBlockError( From d328c1f2c670ba0d9da2191a489158dc6a2ff880 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Thu, 21 Aug 2025 16:48:56 +0200 Subject: [PATCH 04/20] WIP - safe commit --- .../workflows/execution_engine/constants.py | 1 + .../execution_engine/v1/compiler/entities.py | 8 +- .../v1/compiler/graph_constructor.py | 118 ++-- .../execution_engine/v1/executor/core.py | 9 - .../execution_data_manager/execution_cache.py | 2 + .../execution_data_manager/manager.py | 55 +- .../step_input_assembler.py | 138 +++- .../v1/executor/output_constructor.py | 19 +- ...ilation_of_workflow_with_invalid_plugin.py | 13 +- .../plugin_image_producer/__init__.py | 78 ++- ...ng_scalars_to_fit_into_batch_parameters.py | 611 +++++++++++++++++- ...st_workflow_with_arbitrary_batch_inputs.py | 49 +- .../executor/test_output_constructor.py | 2 + 13 files changed, 981 insertions(+), 122 deletions(-) diff --git a/inference/core/workflows/execution_engine/constants.py b/inference/core/workflows/execution_engine/constants.py index aba670d20d..8e93beda09 100644 --- a/inference/core/workflows/execution_engine/constants.py +++ b/inference/core/workflows/execution_engine/constants.py @@ -2,6 +2,7 @@ PARSED_NODE_INPUT_SELECTORS_PROPERTY = "parsed_node_input_selectors" STEP_DEFINITION_PROPERTY = "definition" WORKFLOW_INPUT_BATCH_LINEAGE_ID = "" +TOP_LEVEL_LINEAGE_KEY = "top_level_lineage" IMAGE_TYPE_KEY = "type" IMAGE_VALUE_KEY = "value" ROOT_PARENT_ID_KEY = "root_parent_id" diff --git a/inference/core/workflows/execution_engine/v1/compiler/entities.py b/inference/core/workflows/execution_engine/v1/compiler/entities.py index afc741867f..f5ee3e216f 100644 --- a/inference/core/workflows/execution_engine/v1/compiler/entities.py +++ b/inference/core/workflows/execution_engine/v1/compiler/entities.py @@ -216,6 +216,12 @@ def iterate_through_definitions(self) -> Generator[StepInputDefinition, None, No StepInputData = Dict[str, Union[StepInputDefinition, CompoundStepInputDefinition]] +@dataclass +class AutoBatchCastingConfig: + casted_dimensionality: int + lineage_support: Optional[List[str]] + + @dataclass class StepNode(ExecutionGraphNode): step_manifest: WorkflowBlockManifest @@ -224,7 +230,7 @@ class StepNode(ExecutionGraphNode): child_execution_branches: Dict[str, str] = field(default_factory=dict) execution_branches_impacting_inputs: Set[str] = field(default_factory=set) batch_oriented_parameters: Set[str] = field(default_factory=set) - scalar_parameters_to_be_batched: Set[str] = field(default_factory=set) + auto_batch_casting_lineage_supports: Dict[str, AutoBatchCastingConfig] = field(default_factory=dict) step_execution_dimensionality: int = 0 def controls_flow(self) -> bool: diff --git a/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py b/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py index 471bb3a86c..12cc3946ec 100644 --- a/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py +++ b/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py @@ -1,7 +1,7 @@ import itertools from collections import defaultdict from copy import copy, deepcopy -from typing import Any, Dict, List, Optional, Set, Tuple, Union +from typing import Any, Dict, List, Optional, Set, Tuple, Union, Callable from uuid import uuid4 import networkx as nx @@ -22,7 +22,7 @@ from inference.core.workflows.execution_engine.constants import ( NODE_COMPILATION_OUTPUT_PROPERTY, PARSED_NODE_INPUT_SELECTORS_PROPERTY, - WORKFLOW_INPUT_BATCH_LINEAGE_ID, + WORKFLOW_INPUT_BATCH_LINEAGE_ID, TOP_LEVEL_LINEAGE_KEY, ) from inference.core.workflows.execution_engine.entities.base import ( InputType, @@ -60,7 +60,7 @@ StaticStepInputDefinition, StepInputData, StepInputDefinition, - StepNode, + StepNode, AutoBatchCastingConfig, ) from inference.core.workflows.execution_engine.v1.compiler.graph_traversal import ( traverse_graph_ensuring_parents_are_reached_first, @@ -600,6 +600,7 @@ def denote_data_flow_in_workflow( input_manifest=None, # this is expected never to be reached ) ) + top_level_data_lineage = set() for node in traverse_graph_ensuring_parents_are_reached_first( graph=execution_graph, start_node=super_input_node, @@ -608,8 +609,25 @@ def denote_data_flow_in_workflow( execution_graph=execution_graph, node=node, block_manifest_by_step_name=block_manifest_by_step_name, + on_top_level_lineage_denoted=lambda element: top_level_data_lineage.add(element) ) execution_graph.remove_node(super_input_node) + if len(top_level_data_lineage) > 1: + raise AssumptionError( + public_message=f"Workflow Compiler detected that the workflow contains multiple elements which create " + f"top-level data batches - for instance inputs and blocks that create batched outputs from " + f"scalar parameters. We know it sounds convoluted, but the bottom line is that this " + f"situation is known limitation of Workflows Compiler. " + f"Contact Roboflow team through github issues " + f"(https://github.com/roboflow/inference/issues) providing full " + f"context of the problem - including workflow definition you use.", + context="workflow_compilation | execution_graph_construction | verification_of_batches_sources", + ) + if len(top_level_data_lineage) > 0: + top_level_data_lineage_marker = top_level_data_lineage.pop() + else: + top_level_data_lineage_marker = None + execution_graph.graph[TOP_LEVEL_LINEAGE_KEY] = top_level_data_lineage_marker return execution_graph @@ -617,6 +635,7 @@ def denote_data_flow_for_node( execution_graph: DiGraph, node: str, block_manifest_by_step_name: Dict[str, WorkflowBlockManifest], + on_top_level_lineage_denoted: Callable[[str], None], ) -> DiGraph: if is_input_node(execution_graph=execution_graph, node=node): # everything already set there, in the previous stage of compilation @@ -637,6 +656,7 @@ def denote_data_flow_for_node( execution_graph=execution_graph, node=node, manifest=manifest, + on_top_level_lineage_denoted=on_top_level_lineage_denoted, ) if is_output_node(execution_graph=execution_graph, node=node): # output is allowed to have exactly one predecessor @@ -677,6 +697,7 @@ def denote_data_flow_for_step( execution_graph: DiGraph, node: str, manifest: WorkflowBlockManifest, + on_top_level_lineage_denoted: Callable[[str], None] ) -> DiGraph: all_control_flow_predecessors, all_non_control_flow_predecessors = ( separate_flow_control_predecessors_from_data_providers( @@ -707,7 +728,6 @@ def denote_data_flow_for_step( input_data=input_data, batch_compatibility_of_properties=batch_compatibility_of_properties, ) - step_node_data.scalar_parameters_to_be_batched = scalar_parameters_to_be_batched input_dimensionality_offsets = manifest.get_input_dimensionality_offsets() print("input_dimensionality_offsets", input_dimensionality_offsets) verify_step_input_dimensionality_offsets( @@ -773,7 +793,11 @@ def denote_data_flow_for_step( ) truly_batch_parameters = parameters_with_batch_inputs.difference(scalar_parameters_to_be_batched) if not truly_batch_parameters: - data_lineage = [] + if manifest.get_output_dimensionality_offset() > 0: + # brave decision to open a Pandora box + data_lineage = [node] + else: + data_lineage = [] else: data_lineage = establish_batch_oriented_step_lineage( step_selector=node, @@ -782,7 +806,16 @@ def denote_data_flow_for_step( dimensionality_reference_property=dimensionality_reference_property, output_dimensionality_offset=output_dimensionality_offset, ) + lineage_supports = get_lineage_support_for_auto_batch_casted_parameters( + input_dimensionalities=inputs_dimensionalities, + all_lineages_of_batch_parameters=all_lineages, + scalar_parameters_to_be_batched=scalar_parameters_to_be_batched + ) + step_node_data.auto_batch_casting_lineage_supports = lineage_supports + print("lineage_supports", lineage_supports) print("Data lineage of block output", data_lineage) + if data_lineage: + on_top_level_lineage_denoted(data_lineage[0]) step_node_data.data_lineage = data_lineage return execution_graph @@ -1164,12 +1197,10 @@ def verify_output_offset( dimensionality_reference_property: Optional[str], output_dimensionality_offset: int, ) -> None: - if not parameters_with_batch_inputs and output_dimensionality_offset != 0: - # TODO: this needs to be changed - we should take into account the params which will be - # batch auto-casted here, otherwise we will not be able to operate normally with BAC - raise BlockInterfaceError( - public_message=f"Block defining step {step_name} defines dimensionality offset different " - f"than zero while taking only non-batch parameters, which is not allowed.", + if not parameters_with_batch_inputs and output_dimensionality_offset < 0: + raise StepInputDimensionalityError( + public_message=f"Block defining step {step_name} defines negative dimensionality offset while only " + f"scalar inputs being provided - the block cannot run as there is no dimension to collapse.", context="workflow_compilation | execution_graph_construction | verification_of_output_offset", ) if ( @@ -1584,44 +1615,37 @@ def get_input_data_lineage_excluding_auto_batch_casting( return lineages -def get_input_data_lineage_including_auto_batch_casting( - step_name: str, - input_data: StepInputData, +def get_lineage_support_for_auto_batch_casted_parameters( + input_dimensionalities: Dict[str, Set[int]], scalar_parameters_to_be_batched: Set[str], - inputs_dimensionalities: Dict[str, Set[int]], -) -> List[List[str]]: - lineage_deduplication_set = set() - property_to_lineage: Dict[str, List[List[str]]] = {} - for property_name, input_definition in input_data.items(): - new_lineages_detected_within_property_data = get_lineage_for_input_property( - step_name=step_name, - property_name=property_name, - input_definition=input_definition, - lineage_deduplication_set=lineage_deduplication_set, + all_lineages_of_batch_parameters: List[List[str]], +) -> Dict[str, AutoBatchCastingConfig]: + longest_lineage_support = find_longest_lineage_support( + all_lineages_of_batch_parameters=all_lineages_of_batch_parameters, + ) + result = {} + for parameter_name in scalar_parameters_to_be_batched: + parameter_dimensionality = max(input_dimensionalities[parameter_name]) + if longest_lineage_support is None: + lineage_support = None + else: + lineage_support = longest_lineage_support[:parameter_dimensionality] + result[parameter_name] = AutoBatchCastingConfig( + casted_dimensionality=parameter_dimensionality, + lineage_support=lineage_support, ) - property_to_lineage[property_name] = new_lineages_detected_within_property_data - if not property_to_lineage: - return [] - all_lineages = [lineage for lineages in property_to_lineage.values() for lineage in lineages if len(lineage) > 0] - if len(all_lineages): - verify_lineages(step_name=step_name, detected_lineages=all_lineages) - max_len_lineage = all_lineages[0] - for lineage in all_lineages: - if len(lineage) > len(max_len_lineage): - max_len_lineage = lineage - else: - max_len_lineage = [WORKFLOW_INPUT_BATCH_LINEAGE_ID] - for property_name in scalar_parameters_to_be_batched: - max_dimensionality_for_compound_property = max(inputs_dimensionalities[property_name]) - auto_casted_lineage = max_len_lineage[:min(len(max_len_lineage), max_dimensionality_for_compound_property)] - for i in range(max(0, max_dimensionality_for_compound_property - len(max_len_lineage))): - auto_casted_lineage.append(f"auto-casted-lineage-{len(max_len_lineage) + 1}") - lineage_id = identify_lineage(lineage=auto_casted_lineage) - if lineage_id not in lineage_deduplication_set: - lineage_deduplication_set.add(lineage_id) - all_lineages.append(auto_casted_lineage) - verify_lineages(step_name=step_name, detected_lineages=all_lineages) - return all_lineages + print("DUMMY", result) + return result + + +def find_longest_lineage_support(all_lineages_of_batch_parameters: List[List[str]]) -> Optional[List[str]]: + longest_longest_lineage_support = [] + for lineage in all_lineages_of_batch_parameters: + if len(lineage) > len(longest_longest_lineage_support): + longest_longest_lineage_support = lineage + if len(longest_longest_lineage_support) == 0: + return None + return longest_longest_lineage_support def get_lineage_for_input_property( diff --git a/inference/core/workflows/execution_engine/v1/executor/core.py b/inference/core/workflows/execution_engine/v1/executor/core.py index e3f7d54707..042ed0f9c1 100644 --- a/inference/core/workflows/execution_engine/v1/executor/core.py +++ b/inference/core/workflows/execution_engine/v1/executor/core.py @@ -304,15 +304,6 @@ def run_non_simd_step( }, ): step_result = step_instance.run(**step_input) - if isinstance(step_result, list): - raise ExecutionEngineRuntimeError( - public_message=f"Error in execution engine. Non-SIMD step {step_name} " - f"produced list of results which is not expected. This is most likely bug. " - f"Contact Roboflow team through github issues " - f"(https://github.com/roboflow/inference/issues) providing full context of" - f"the problem - including workflow definition you use.", - context="workflow_execution | step_output_registration", - ) with profiler.profile_execution_phase( name="step_output_registration", categories=["execution_engine_operation"], diff --git a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/execution_cache.py b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/execution_cache.py index 7bf5148bd8..7cef6ac6d9 100644 --- a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/execution_cache.py +++ b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/execution_cache.py @@ -87,6 +87,7 @@ def register_batch_of_step_outputs( indices: List[DynamicBatchIndex], outputs: List[Dict[str, Any]], ) -> None: + print(f"REGISTERING {step_name} - {indices} - {outputs}") if not self.step_outputs_batches(step_name=step_name): raise ExecutionEngineRuntimeError( public_message=f"Error in execution engine. Attempted to register batch outputs for " @@ -100,6 +101,7 @@ def register_batch_of_step_outputs( self._cache_content[step_name].register_outputs( indices=indices, outputs=outputs ) + print("VERIF", id(self._cache_content[step_name]), self._cache_content[step_name]._cache_content) self._step_outputs_registered.add(step_name) except (TypeError, AttributeError) as e: # checking this case defensively as there is no guarantee on block diff --git a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py index e5b4412b52..2605ec2524 100644 --- a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py +++ b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py @@ -121,7 +121,7 @@ def get_non_simd_step_input(self, step_selector: str) -> Optional[Dict[str, Any] ) def register_non_simd_step_output( - self, step_selector: str, output: Union[Dict[str, Any], FlowControl] + self, step_selector: str, output: Union[Dict[str, Any], FlowControl, list] ) -> None: if self.is_step_simd(step_selector=step_selector): raise ExecutionEngineRuntimeError( @@ -138,6 +138,45 @@ def register_non_simd_step_output( node=step_selector, expected_type=StepNode, ) + if step_node.output_dimensionality == 1: + # we only allow +1 dim increase for now, so it is fine to only handle this case + indices = [(i, ) for i in range(len(output))] + print(f"DIMENSIONALITY WAS JUST BORN FOR LINEAGE: {step_node.data_lineage} with indices: {indices} :)") + self._dynamic_batches_manager.register_element_indices_for_lineage( + lineage=step_node.data_lineage, + indices=indices, + ) + if step_node.child_execution_branches: + if not all(isinstance(element, FlowControl) for element in output): + raise ExecutionEngineRuntimeError( + public_message=f"Error in execution engine. Flow control step {step_name} " + f"expected to only produce FlowControl objects. This is most likely bug. " + f"Contact Roboflow team through github issues " + f"(https://github.com/roboflow/inference/issues) providing full context of" + f"the problem - including workflow definition you use.", + context="workflow_execution | step_output_registration", + ) + self._register_flow_control_output_for_simd_step( + step_node=step_node, + indices=indices, + outputs=output, + ) + return None + self._execution_cache.register_batch_of_step_outputs( + step_name=step_name, + indices=indices, + outputs=output, + ) + return None + if isinstance(output, list): + raise ExecutionEngineRuntimeError( + public_message=f"Error in execution engine. Non-SIMD step {step_name} " + f"produced list of results which is not expected. This is most likely bug. " + f"Contact Roboflow team through github issues " + f"(https://github.com/roboflow/inference/issues) providing full context of" + f"the problem - including workflow definition you use.", + context="workflow_execution | step_output_registration", + ) if isinstance(output, FlowControl): self._register_flow_control_output_for_non_simd_step( step_node=step_node, @@ -307,7 +346,7 @@ def get_selector_indices(self, selector: str) -> Optional[List[DynamicBatchIndex ] selector_lineage = input_node.data_lineage elif is_step_selector(selector_or_value=potential_step_selector): - if self.is_step_simd(step_selector=potential_step_selector): + if self.does_step_produce_batches(step_selector=potential_step_selector): step_node_data: StepNode = self._execution_graph.nodes[ potential_step_selector ][NODE_COMPILATION_OUTPUT_PROPERTY] @@ -321,6 +360,7 @@ def get_selector_indices(self, selector: str) -> Optional[List[DynamicBatchIndex f"the problem - including workflow definition you use.", context="workflow_execution | getting_workflow_data_indices", ) + print(f"get_selector_indices(selector={selector}): - selector_lineage: {selector_lineage}") if not selector_lineage: return None return self.get_lineage_indices(lineage=selector_lineage) @@ -413,13 +453,14 @@ def get_batch_data( ] elif is_step_selector( selector_or_value=potential_step_selector - ) and self.is_step_simd(step_selector=potential_step_selector): + ) and self.does_step_produce_batches(step_selector=potential_step_selector): step_name = get_last_chunk_of_selector(selector=potential_step_selector) if selector.endswith(".*"): return self._execution_cache.get_all_batch_step_outputs( step_name=step_name, batch_elements_indices=indices, ) + print(f"Getting batch results with selector: {selector} from indices: {indices}") return self._execution_cache.get_batch_output( selector=selector, batch_elements_indices=indices, @@ -462,6 +503,14 @@ def is_step_simd(self, step_selector: str) -> bool: ) return step_node_data.is_batch_oriented() + def does_step_produce_batches(self, step_selector: str) -> bool: + step_node_data = node_as( + execution_graph=self._execution_graph, + node=step_selector, + expected_type=StepNode, + ) + return step_node_data.output_dimensionality > 0 + def does_input_represent_batch(self, input_selector: str) -> bool: input_node = node_as( execution_graph=self._execution_graph, diff --git a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py index 1fbbdd39a2..bebef66217 100644 --- a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py +++ b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py @@ -8,7 +8,7 @@ DynamicStepInputDefinition, StaticStepInputDefinition, StepInputDefinition, - StepNode, + StepNode, AutoBatchCastingConfig, ) from inference.core.workflows.execution_engine.v1.compiler.utils import ( get_last_chunk_of_selector, @@ -377,7 +377,7 @@ def prepare_parameters( runtime_parameters=runtime_parameters, execution_cache=execution_cache, guard_of_indices_wrapping=guard_of_indices_wrapping, - scalar_parameters_to_be_batched=step_node.scalar_parameters_to_be_batched, + auto_batch_casting_lineage_supports=step_node.auto_batch_casting_lineage_supports, ) compound_inputs.add(parameter_name) else: @@ -393,12 +393,13 @@ def prepare_parameters( runtime_parameters=runtime_parameters, execution_cache=execution_cache, guard_of_indices_wrapping=guard_of_indices_wrapping, - scalar_parameters_to_be_batched=step_node.scalar_parameters_to_be_batched, + auto_batch_casting_lineage_supports=step_node.auto_batch_casting_lineage_supports, ) contains_empty_scalar_step_output_selector = ( contains_empty_scalar_step_output_selector or value_contains_empty_scalar_step_output_selector ) + print("indices_for_parameter", indices_for_parameter) batch_parameters_indices = [ i for i in indices_for_parameter.values() if i is not None ] @@ -436,7 +437,7 @@ def get_compound_parameter_value( runtime_parameters: Dict[str, Any], execution_cache: ExecutionCache, guard_of_indices_wrapping: GuardForIndicesWrapping, - scalar_parameters_to_be_batched: Set[str], + auto_batch_casting_lineage_supports: Dict[str, AutoBatchCastingConfig], ) -> Tuple[Union[list, Dict[str, Any]], Optional[List[DynamicBatchIndex]], bool]: contains_empty_scalar_step_output_selector = False batch_indices = [] @@ -455,7 +456,7 @@ def get_compound_parameter_value( runtime_parameters=runtime_parameters, execution_cache=execution_cache, guard_of_indices_wrapping=guard_of_indices_wrapping, - scalar_parameters_to_be_batched=scalar_parameters_to_be_batched, + auto_batch_casting_lineage_supports=auto_batch_casting_lineage_supports, ) result.append(non_compound_parameter_value) contains_empty_scalar_step_output_selector = ( @@ -479,7 +480,7 @@ def get_compound_parameter_value( runtime_parameters=runtime_parameters, execution_cache=execution_cache, guard_of_indices_wrapping=guard_of_indices_wrapping, - scalar_parameters_to_be_batched=scalar_parameters_to_be_batched, + auto_batch_casting_lineage_supports=auto_batch_casting_lineage_supports, ) result[nested_element.parameter_specification.nested_element_key] = ( non_compound_parameter_value @@ -505,10 +506,10 @@ def get_non_compound_parameter_value( runtime_parameters: Dict[str, Any], execution_cache: ExecutionCache, guard_of_indices_wrapping: GuardForIndicesWrapping, - scalar_parameters_to_be_batched: Set[str], -) -> Union[Any, Optional[List[DynamicBatchIndex]], bool]: + auto_batch_casting_lineage_supports: Dict[str, AutoBatchCastingConfig], +) -> Tuple[Any, Optional[List[DynamicBatchIndex]], bool]: if not parameter.is_batch_oriented(): - requested_as_batch = parameter.parameter_specification.parameter_name in scalar_parameters_to_be_batched + requested_as_batch = parameter.parameter_specification.parameter_name in auto_batch_casting_lineage_supports if parameter.points_to_input(): input_parameter: DynamicStepInputDefinition = parameter # type: ignore parameter_name = get_last_chunk_of_selector( @@ -517,9 +518,17 @@ def get_non_compound_parameter_value( if not requested_as_batch: return runtime_parameters[parameter_name], None, False else: - indices = [(0, )] - batched_value = Batch(content=[runtime_parameters[parameter_name]], indices=indices) - return batched_value, indices, False + return apply_auto_batch_casting( + parameter_name=parameter_name, + value=runtime_parameters[parameter_name], + auto_batch_casting_config=auto_batch_casting_lineage_supports[ + parameter.parameter_specification.parameter_name + ], + contains_empty_scalar_step_output_selector=False, + dynamic_batches_manager=dynamic_batches_manager, + step_execution_dimensionality=step_execution_dimensionality, + guard_of_indices_wrapping=guard_of_indices_wrapping, + ) elif parameter.points_to_step_output(): input_parameter: DynamicStepInputDefinition = parameter # type: ignore value = execution_cache.get_non_batch_output( @@ -528,17 +537,33 @@ def get_non_compound_parameter_value( if not requested_as_batch: return value, None, value is None else: - indices = [(0,)] - batched_value = Batch(content=[value], indices=indices) - return batched_value, indices, value is None + return apply_auto_batch_casting( + parameter_name=parameter.parameter_specification.parameter_name, + value=value, + auto_batch_casting_config=auto_batch_casting_lineage_supports[ + parameter.parameter_specification.parameter_name + ], + contains_empty_scalar_step_output_selector=value is None, + dynamic_batches_manager=dynamic_batches_manager, + step_execution_dimensionality=step_execution_dimensionality, + guard_of_indices_wrapping=guard_of_indices_wrapping, + ) else: static_input: StaticStepInputDefinition = parameter # type: ignore if not requested_as_batch: return static_input.value, None, False else: - indices = [(0,)] - batched_value = Batch(content=[static_input.value], indices=indices) - return batched_value, indices, False + return apply_auto_batch_casting( + parameter_name=parameter.parameter_specification.parameter_name, + value=static_input.value, + auto_batch_casting_config=auto_batch_casting_lineage_supports[ + parameter.parameter_specification.parameter_name + ], + contains_empty_scalar_step_output_selector=False, + dynamic_batches_manager=dynamic_batches_manager, + step_execution_dimensionality=step_execution_dimensionality, + guard_of_indices_wrapping=guard_of_indices_wrapping, + ) dynamic_parameter: DynamicStepInputDefinition = parameter # type: ignore parameter_dimensionality = dynamic_parameter.get_dimensionality() lineage_indices = dynamic_batches_manager.get_indices_for_data_lineage( @@ -612,6 +637,83 @@ def get_non_compound_parameter_value( return result, result.indices, False +def apply_auto_batch_casting( + parameter_name: str, + value: Any, + auto_batch_casting_config: AutoBatchCastingConfig, + contains_empty_scalar_step_output_selector: bool, + dynamic_batches_manager: DynamicBatchesManager, + step_execution_dimensionality: int, + guard_of_indices_wrapping: GuardForIndicesWrapping, +) -> Tuple[Any, List[DynamicBatchIndex], bool]: + print(f"parameter_name: {parameter_name} - auto_batch_casting_config: {auto_batch_casting_config}") + if auto_batch_casting_config.lineage_support is None: + indices = [(0, ) * auto_batch_casting_config.casted_dimensionality] + else: + indices = dynamic_batches_manager.get_indices_for_data_lineage( + lineage=auto_batch_casting_config.lineage_support, + ) + missing_dimensions = auto_batch_casting_config.casted_dimensionality - len( + auto_batch_casting_config.lineage_support + ) + if missing_dimensions > 0: + padding = (0, ) * missing_dimensions + indices = [i + padding for i in indices] + batch_content = [value] * len(indices) + created_batch = Batch(content=batch_content, indices=indices) + if step_execution_dimensionality == auto_batch_casting_config.casted_dimensionality: + return created_batch, indices, contains_empty_scalar_step_output_selector + if step_execution_dimensionality > auto_batch_casting_config.casted_dimensionality: + raise ExecutionEngineRuntimeError( + public_message=f"Detected a situation when parameter: " + f"{parameter_name}" + f"has auto-batch casted dimensionality {auto_batch_casting_config.casted_dimensionality} larger " + f"than step execution dimensionality: {step_execution_dimensionality}. " + f"This is most likely a bug. " + f"Contact Roboflow team through github issues " + f"(https://github.com/roboflow/inference/issues) providing full context of" + f"the problem - including workflow definition you use.", + context="workflow_execution | step_input_assembling", + ) + if abs(auto_batch_casting_config.casted_dimensionality - step_execution_dimensionality) > 1: + raise ExecutionEngineRuntimeError( + public_message=f"Detected a situation when parameter: " + f"{parameter_name} has auto batch casted " + f"dimensionality {auto_batch_casting_config.casted_dimensionality} differing more than one level " + f"from step execution dimensionality: {step_execution_dimensionality}. " + f"This is most likely a bug. " + f"Contact Roboflow team through github issues " + f"(https://github.com/roboflow/inference/issues) providing full context of" + f"the problem - including workflow definition you use.", + context="workflow_execution | step_input_assembling", + ) + if auto_batch_casting_config.lineage_support is None: + upper_level_indices = [indices[0][:-1]] + else: + upper_level_lineage_dimensionality = auto_batch_casting_config.casted_dimensionality - 1 + upper_level_lineage = auto_batch_casting_config.lineage_support[:upper_level_lineage_dimensionality] + if upper_level_lineage_dimensionality < 1 or len(upper_level_lineage) < upper_level_lineage_dimensionality: + raise ExecutionEngineRuntimeError( + public_message=f"Detected a situation when parameter: {parameter_name} requires dimensionality " + f"wrapping, but registered lineage support is incompatible which should be detected " + f"by the compiler. This is most likely a bug. " + f"Contact Roboflow team through github issues " + f"(https://github.com/roboflow/inference/issues) providing full context of" + f"the problem - including workflow definition you use.", + context="workflow_execution | step_input_assembling", + ) + upper_level_indices = dynamic_batches_manager.get_indices_for_data_lineage( + lineage=upper_level_lineage, + ) + result = reduce_batch_dimensionality( + indices=indices, + upper_level_index=upper_level_indices, + data=batch_content, + guard_of_indices_wrapping=guard_of_indices_wrapping, + ) + return result, result.indices, contains_empty_scalar_step_output_selector + + def _flatten_batch_oriented_inputs( inputs: list, dimensionality: int, diff --git a/inference/core/workflows/execution_engine/v1/executor/output_constructor.py b/inference/core/workflows/execution_engine/v1/executor/output_constructor.py index 7da2c224b1..dacaacab7d 100644 --- a/inference/core/workflows/execution_engine/v1/executor/output_constructor.py +++ b/inference/core/workflows/execution_engine/v1/executor/output_constructor.py @@ -11,7 +11,7 @@ ) from inference.core.workflows.errors import AssumptionError, ExecutionEngineRuntimeError from inference.core.workflows.execution_engine.constants import ( - WORKFLOW_INPUT_BATCH_LINEAGE_ID, + WORKFLOW_INPUT_BATCH_LINEAGE_ID, TOP_LEVEL_LINEAGE_KEY, ) from inference.core.workflows.execution_engine.entities.base import ( CoordinatesSystem, @@ -45,6 +45,7 @@ def construct_workflow_output( output_name2indices[output.name] = execution_data_manager.get_selector_indices( selector=output.selector ) + print("output_name2indices", output_name2indices) batch_oriented_outputs = { output for output, indices in output_name2indices.items() if indices is not None } @@ -93,11 +94,15 @@ def construct_workflow_output( for output in workflow_outputs if output.coordinates_system is CoordinatesSystem.PARENT } - major_batch_size = len( - execution_data_manager.get_lineage_indices( - lineage=[WORKFLOW_INPUT_BATCH_LINEAGE_ID] + top_level_data_lineage_marker = execution_graph.graph.get(TOP_LEVEL_LINEAGE_KEY) + if top_level_data_lineage_marker: + major_batch_size = len( + execution_data_manager.get_lineage_indices( + lineage=[top_level_data_lineage_marker] + ) ) - ) + else: + major_batch_size = 0 for name in batch_oriented_outputs: array = outputs_arrays[name] indices = output_name2indices[name] @@ -105,6 +110,8 @@ def construct_workflow_output( selector=name2selector[name], indices=indices, ) + print(f"Retrieved data for {name} - {data}") + print(f"output array: {array}") for index, data_piece in zip(indices, data): if ( name in outputs_requested_in_parent_coordinates @@ -134,7 +141,9 @@ def construct_workflow_output( f"the problem - including workflow definition you use.", context="workflow_execution | output_construction", ) + print("outputs_arrays", outputs_arrays) results = [] + print("major_batch_size", major_batch_size) for i in range(major_batch_size): single_result = {} for name, value in non_batch_outputs.items(): diff --git a/tests/workflows/integration_tests/compilation/test_compilation_of_workflow_with_invalid_plugin.py b/tests/workflows/integration_tests/compilation/test_compilation_of_workflow_with_invalid_plugin.py index cf0f0f0f58..f0cc8ec893 100644 --- a/tests/workflows/integration_tests/compilation/test_compilation_of_workflow_with_invalid_plugin.py +++ b/tests/workflows/integration_tests/compilation/test_compilation_of_workflow_with_invalid_plugin.py @@ -6,6 +6,7 @@ from inference.core.managers.base import ModelManager from inference.core.workflows.core_steps.common.entities import StepExecutionMode from inference.core.workflows.errors import BlockInterfaceError +from inference.core.workflows.execution_engine.constants import TOP_LEVEL_LINEAGE_KEY from inference.core.workflows.execution_engine.introspection import blocks_loader from inference.core.workflows.execution_engine.v1.compiler.core import compile_workflow @@ -138,11 +139,13 @@ def test_compilation_of_workflow_where_block_is_not_simd_but_defines_output_offs } # when - with pytest.raises(BlockInterfaceError): - _ = compile_workflow( - workflow_definition=WORKFLOW_WITH_INVALID_BLOCK_DECLARING_OFFSET_BEING_NOT_SIMD, - init_parameters=workflow_init_parameters, - ) + compiled_workflow = compile_workflow( + workflow_definition=WORKFLOW_WITH_INVALID_BLOCK_DECLARING_OFFSET_BEING_NOT_SIMD, + init_parameters=workflow_init_parameters, + ) + + # then + assert compiled_workflow.execution_graph.graph[TOP_LEVEL_LINEAGE_KEY] == "$steps.problematic_dimensions" WORKFLOW_WITH_INVALID_BLOCK_DECLARING_DIMENSIONALITY_REFERENCE_PROPERTY_AS_NON_BATCH = { diff --git a/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py b/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py index b6d3a04a9a..d3e3adf2a4 100644 --- a/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py +++ b/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py @@ -1,8 +1,9 @@ import json -from typing import Literal, List, Optional, Type, Any +from typing import Literal, List, Optional, Type, Any, Tuple from uuid import uuid4 import numpy as np +from pydantic import Field from inference.core.workflows.execution_engine.entities.base import OutputDefinition, WorkflowImageData, \ ImageParentMetadata, Batch @@ -12,6 +13,7 @@ class ImageProducerBlockManifest(WorkflowBlockManifest): type: Literal["ImageProducer"] + shape: Tuple[int, int, int] = Field(default=(192, 168, 3)) @classmethod def describe_outputs(cls) -> List[OutputDefinition]: @@ -28,10 +30,10 @@ class ImageProducerBlock(WorkflowBlock): def get_manifest(cls) -> Type[WorkflowBlockManifest]: return ImageProducerBlockManifest - def run(self) -> BlockResult: + def run(self, shape: Tuple[int, int, int]) -> BlockResult: image = WorkflowImageData( parent_metadata=ImageParentMetadata(parent_id=f"image_producer.{uuid4()}"), - numpy_image=np.zeros((192, 168, 3), dtype=np.uint8) + numpy_image=np.zeros(shape, dtype=np.uint8) ) return {"image": image} @@ -66,6 +68,29 @@ def run(self, images: Batch[WorkflowImageData]) -> BlockResult: return results +class SingleImageConsumerNonSIMDManifest(WorkflowBlockManifest): + type: Literal["ImageConsumerNonSIMD"] + images: Selector(kind=[IMAGE_KIND]) + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="shapes", kind=[STRING_KIND])] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + +class SingleImageConsumerNonSIMD(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return SingleImageConsumerNonSIMDManifest + + def run(self, images: WorkflowImageData) -> BlockResult: + return {"shapes": json.dumps(images.numpy_image.shape)} + + class MultiSIMDImageConsumerManifest(WorkflowBlockManifest): type: Literal["MultiSIMDImageConsumer"] images_x: Selector(kind=[IMAGE_KIND]) @@ -93,7 +118,7 @@ def get_manifest(cls) -> Type[WorkflowBlockManifest]: def run(self, images_x: Batch[WorkflowImageData], images_y: Batch[WorkflowImageData]) -> BlockResult: results = [] for image_x, image_y in zip(images_x, images_y): - results.append({"shapes": json.dumps(image_x.numpy_image.shape) + json.dumps(image_y.numpy_image.shape)}) + results.append({"metadata": json.dumps(image_x.numpy_image.shape) + json.dumps(image_y.numpy_image.shape)}) return results @@ -104,7 +129,7 @@ class MultiImageConsumerManifest(WorkflowBlockManifest): @classmethod def describe_outputs(cls) -> List[OutputDefinition]: - return [OutputDefinition(name="metadata", kind=[STRING_KIND])] + return [OutputDefinition(name="shapes", kind=[STRING_KIND])] @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: @@ -128,7 +153,7 @@ class MultiImageConsumerRaisingDimManifest(WorkflowBlockManifest): @classmethod def describe_outputs(cls) -> List[OutputDefinition]: - return [OutputDefinition(name="metadata", kind=[STRING_KIND])] + return [OutputDefinition(name="shapes", kind=[STRING_KIND])] @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: @@ -156,7 +181,7 @@ class MultiSIMDImageConsumerRaisingDimManifest(WorkflowBlockManifest): @classmethod def describe_outputs(cls) -> List[OutputDefinition]: - return [OutputDefinition(name="metadata", kind=[STRING_KIND])] + return [OutputDefinition(name="shapes", kind=[STRING_KIND])] @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: @@ -180,6 +205,37 @@ def run(self, images_x: Batch[WorkflowImageData], images_y: Batch[WorkflowImageD return results +class MultiSIMDImageConsumerDecreasingDimManifest(WorkflowBlockManifest): + type: Literal["MultiSIMDImageConsumerDecreasingDim"] + images_x: Selector(kind=[IMAGE_KIND]) + images_y: Selector(kind=[IMAGE_KIND]) + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="shapes", kind=[STRING_KIND])] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + @classmethod + def get_output_dimensionality_offset(cls) -> int: + return -1 + + +class MultiSIMDImageConsumerDecreasingDim(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return MultiSIMDImageConsumerDecreasingDimManifest + + def run(self, images_x: Batch[WorkflowImageData], images_y: Batch[WorkflowImageData]) -> BlockResult: + results = [] + for image_x, image_y in zip(images_x, images_y): + results.append(json.dumps(image_x.numpy_image.shape) + json.dumps(image_y.numpy_image.shape)) + return {"shapes": "\n".join(results)} + + class IdentityManifest(WorkflowBlockManifest): type: Literal["Identity"] x: Selector() @@ -215,6 +271,10 @@ def describe_outputs(cls) -> List[OutputDefinition]: def get_execution_engine_compatibility(cls) -> Optional[str]: return ">=1.3.0,<2.0.0" + @classmethod + def get_parameters_accepting_batches(cls) -> List[str]: + return ["x"] + class IdentitySIMDBlock(WorkflowBlock): @@ -231,10 +291,12 @@ def load_blocks() -> List[Type[WorkflowBlock]]: return [ ImageProducerBlock, SingleImageConsumer, + SingleImageConsumerNonSIMD, MultiSIMDImageConsumer, MultiImageConsumer, MultiImageConsumerRaisingDim, MultiSIMDImageConsumerRaisingDim, IdentityBlock, - IdentitySIMDBlock + IdentitySIMDBlock, + MultiSIMDImageConsumerDecreasingDim ] diff --git a/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py b/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py index b587b53849..2a88894680 100644 --- a/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py +++ b/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py @@ -1,13 +1,17 @@ from unittest import mock from unittest.mock import MagicMock +import numpy as np +import pytest + from inference.core.env import WORKFLOWS_MAX_CONCURRENT_STEPS from inference.core.managers.base import ModelManager from inference.core.workflows.core_steps.common.entities import StepExecutionMode +from inference.core.workflows.errors import StepInputDimensionalityError from inference.core.workflows.execution_engine.core import ExecutionEngine from inference.core.workflows.execution_engine.introspection import blocks_loader -WORKFLOW_PROCESSING_VIDEO_METADATA = { +WORKFLOW_IMAGE_PRODUCER_SINGLE_IMAGE_SIMD_CONSUMER = { "version": "1.1", "inputs": [], "steps": [ @@ -36,10 +40,6 @@ def test_workflow_producing_image_and_consuming_it_in_block_accepting_single_bat get_plugin_modules_mock: MagicMock, model_manager: ModelManager, ) -> None: - """ - In this test scenario, we verify compatibility of new input type (WorkflowVideoMetadata) - with Workflows compiler and execution engine. - """ # given get_plugin_modules_mock.return_value = [ "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" @@ -50,7 +50,60 @@ def test_workflow_producing_image_and_consuming_it_in_block_accepting_single_bat "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, } execution_engine = ExecutionEngine.init( - workflow_definition=WORKFLOW_PROCESSING_VIDEO_METADATA, + workflow_definition=WORKFLOW_IMAGE_PRODUCER_SINGLE_IMAGE_SIMD_CONSUMER, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run( + runtime_parameters={} + ) + + # then + assert result == [{"shapes": "[192, 168, 3]"}] + + +WORKFLOW_IMAGE_PRODUCER_SINGLE_IMAGE_NON_SIMD_CONSUMER = { + "version": "1.1", + "inputs": [], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer", + }, + { + "type": "ImageConsumerNonSIMD", + "name": "image_consumer", + "images": "$steps.image_producer.image" + } + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.image_consumer.shapes", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_producing_image_and_consuming_it_in_block_accepting_single_non_simd_input( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_IMAGE_PRODUCER_SINGLE_IMAGE_NON_SIMD_CONSUMER, init_parameters=workflow_init_parameters, max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, ) @@ -61,5 +114,547 @@ def test_workflow_producing_image_and_consuming_it_in_block_accepting_single_bat ) # then - print(result) - raise Exception() + assert result == [{"shapes": "[192, 168, 3]"}] + + +WORKFLOW_SINGLE_IMAGE_SIMD_CONSUMER_FROM_INPUT = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image"}, + ], + "steps": [ + { + "type": "ImageConsumer", + "name": "image_consumer", + "images": "$inputs.image" + } + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.image_consumer.shapes", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_consuming_input_image_in_block_accepting_single_non_simd_input( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_SINGLE_IMAGE_SIMD_CONSUMER_FROM_INPUT, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + image = np.zeros((240, 230, 3), dtype=np.uint8) + + # when + result = execution_engine.run( + runtime_parameters={"image": image} + ) + + # then + assert result == [{"shapes": "[240, 230, 3]"}] + + +WORKFLOW_IMAGE_PRODUCER_MULTIPLE_IMAGES_SIMD_CONSUMER = { + "version": "1.1", + "inputs": [], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + }, + { + "type": "ImageProducer", + "name": "image_producer_y", + "shape": (240, 230, 3) + }, + { + "type": "MultiSIMDImageConsumer", + "name": "image_consumer", + "images_x": "$steps.image_producer_x.image", + "images_y": "$steps.image_producer_y.image", + } + ], + "outputs": [ + { + "type": "JsonField", + "name": "metadata", + "selector": "$steps.image_consumer.metadata", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_image_consumers_consuming_images_generated_by_image_producers_outputting_scalar_images( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_IMAGE_PRODUCER_MULTIPLE_IMAGES_SIMD_CONSUMER, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run(runtime_parameters={}) + + # then + assert result == [{"metadata": "[192, 168, 3][240, 230, 3]"}] + + +WORKFLOW_IMAGE_PRODUCER_AND_INPUT_IMAGES_COMBINED_WITH_MULTIPLE_IMAGES_SIMD_CONSUMER = { + "version": "1.1", + "inputs": [{"type": "WorkflowImage", "name": "image"},], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + }, + { + "type": "MultiSIMDImageConsumer", + "name": "image_consumer", + "images_x": "$steps.image_producer_x.image", + "images_y": "$inputs.image", + } + ], + "outputs": [ + { + "type": "JsonField", + "name": "metadata", + "selector": "$steps.image_consumer.metadata", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_image_consumers_consuming_images_generated_by_image_producer_and_input_images_batch( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_IMAGE_PRODUCER_AND_INPUT_IMAGES_COMBINED_WITH_MULTIPLE_IMAGES_SIMD_CONSUMER, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + input_images = [ + np.zeros((192, 192, 3), dtype=np.uint8), + np.zeros((200, 192, 3), dtype=np.uint8), + np.zeros((300, 192, 3), dtype=np.uint8) + ] + # when + result = execution_engine.run(runtime_parameters={"image": input_images}) + + # then + assert result == [ + {"metadata": "[192, 168, 3][192, 192, 3]"}, + {"metadata": "[192, 168, 3][200, 192, 3]"}, + {"metadata": "[192, 168, 3][300, 192, 3]"} + ] + + +WORKFLOW_IMAGE_PRODUCER_AND_STEP_OUTPUT_IMAGES_COMBINED_WITH_MULTIPLE_IMAGES_SIMD_CONSUMER = { + "version": "1.1", + "inputs": [{"type": "WorkflowImage", "name": "image"},], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + }, + { + "type": "IdentitySIMD", + "name": "identity_simd", + "x": "$inputs.image", + }, + { + "type": "MultiSIMDImageConsumer", + "name": "image_consumer", + "images_x": "$steps.image_producer_x.image", + "images_y": "$steps.identity_simd.x", + } + ], + "outputs": [ + { + "type": "JsonField", + "name": "metadata", + "selector": "$steps.image_consumer.metadata", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_image_consumers_consuming_images_generated_by_image_producer_and_another_simd_block( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_IMAGE_PRODUCER_AND_STEP_OUTPUT_IMAGES_COMBINED_WITH_MULTIPLE_IMAGES_SIMD_CONSUMER, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + input_images = [ + np.zeros((192, 192, 3), dtype=np.uint8), + np.zeros((200, 192, 3), dtype=np.uint8), + np.zeros((300, 192, 3), dtype=np.uint8) + ] + # when + result = execution_engine.run(runtime_parameters={"image": input_images}) + + # then + assert result == [ + {"metadata": "[192, 168, 3][192, 192, 3]"}, + {"metadata": "[192, 168, 3][200, 192, 3]"}, + {"metadata": "[192, 168, 3][300, 192, 3]"} + ] + + +WORKFLOW_WITH_SCALAR_MULTI_IMAGE_CONSUMER_FED_BY_SCALAR_PRODUCERS = { + "version": "1.1", + "inputs": [], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + }, + { + "type": "IdentitySIMD", + "name": "identity_simd", + "x": "$steps.image_producer_x.image", + }, + { + "type": "ImageProducer", + "name": "image_producer_y", + "shape": (220, 230, 3), + }, + { + "type": "MultiImageConsumer", + "name": "image_consumer", + "images_x": "$steps.identity_simd.x", + "images_y": "$steps.image_producer_y.image" + } + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.image_consumer.shapes", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_multiple_scalar_producers_feeding_data_into_scalar_consumer( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SCALAR_MULTI_IMAGE_CONSUMER_FED_BY_SCALAR_PRODUCERS, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run( + runtime_parameters={} + ) + + # then + assert result == [{"shapes": "[192, 168, 3][220, 230, 3]"}] + + +WORKFLOW_WITH_SCALAR_MULTI_IMAGE_CONSUMER_FED_BY_SCALAR_PRODUCER_AND_BATCH_INPUT = { + "version": "1.1", + "inputs": [{"type": "WorkflowImage", "name": "image"}], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_y", + "shape": (220, 230, 3), + }, + { + "type": "MultiImageConsumer", + "name": "image_consumer", + "images_x": "$inputs.image", + "images_y": "$steps.image_producer_y.image" + } + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.image_consumer.shapes", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_scalar_producer_and_batch_input_feeding_data_into_scalar_consumer( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SCALAR_MULTI_IMAGE_CONSUMER_FED_BY_SCALAR_PRODUCER_AND_BATCH_INPUT, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + image = np.zeros((200, 400, 3), dtype=np.uint8) + + # when + result = execution_engine.run(runtime_parameters={"image": image}) + + # then + assert result == [{"shapes": "[200, 400, 3][220, 230, 3]"}] + + +WORKFLOW_WITH_NON_SIMD_CONSUMER_RAISING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCERS = { + "version": "1.1", + "inputs": [], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + }, + { + "type": "IdentitySIMD", + "name": "identity_simd", + "x": "$steps.image_producer_x.image", + }, + { + "type": "ImageProducer", + "name": "image_producer_y", + "shape": (220, 230, 3), + }, + { + "type": "MultiImageConsumerRaisingDim", + "name": "image_consumer", + "images_x": "$steps.identity_simd.x", + "images_y": "$steps.image_producer_y.image" + } + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.image_consumer.shapes", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_multiple_scalar_producers_feeding_non_simd_consumer_raising_dim( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + + # then + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_SIMD_CONSUMER_RAISING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCERS, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run( + runtime_parameters={} + ) + + # then + assert result == [{"shapes": "[192, 168, 3][220, 230, 3]"}] + + +WORKFLOW_WITH_NON_SIMD_CONSUMER_RAISING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCER_AND_BATCH_INPUT = { + "version": "1.1", + "inputs": [{"type": "WorkflowImage", "name": "image"}], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + }, + { + "type": "IdentitySIMD", + "name": "identity_simd", + "x": "$steps.image_producer_x.image", + }, + { + "type": "MultiImageConsumerRaisingDim", + "name": "image_consumer", + "images_x": "$steps.identity_simd.x", + "images_y": "$inputs.image" + } + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.image_consumer.shapes", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_scalar_producer_and_batch_input_feeding_non_simd_consumer_raising_dim( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + + # then + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_SIMD_CONSUMER_RAISING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCER_AND_BATCH_INPUT, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + image_1 = np.zeros((200, 100, 3), dtype=np.uint8) + image_2 = np.zeros((300, 100, 3), dtype=np.uint8) + + # when + result = execution_engine.run( + runtime_parameters={"image": [image_1, image_2]} + ) + + # then + assert result == [ + {"shapes": ["[192, 168, 3][200, 100, 3]"]}, + {"shapes": ["[192, 168, 3][300, 100, 3]"]}, + ] + + +WORKFLOW_WITH_NON_SIMD_CONSUMER_DECREASING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCERS = { + "version": "1.1", + "inputs": [], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + }, + { + "type": "IdentitySIMD", + "name": "identity_simd", + "x": "$steps.image_producer_x.image", + }, + { + "type": "ImageProducer", + "name": "image_producer_y", + "shape": (220, 230, 3), + }, + { + "type": "MultiSIMDImageConsumerDecreasingDim", + "name": "image_consumer", + "images_x": "$steps.identity_simd.x", + "images_y": "$steps.image_producer_y.image" + } + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.image_consumer.shapes", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_multiple_scalar_producers_feeding_non_simd_consumer_decreasing_dim( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + + # then + with pytest.raises(StepInputDimensionalityError): + _ = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_SIMD_CONSUMER_DECREASING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCERS, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py b/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py index 51802efefb..af22e6661d 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py @@ -793,14 +793,17 @@ def test_workflow_when_non_batch_oriented_step_feeds_batch_oriented_step_operati "workflows_core.api_key": None, "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_BATCH_ORIENTED_STEP_FEEDING_BATCH_ORIENTED_STEP_OPERATING_BATCH_WISE, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) # when - with pytest.raises(ExecutionGraphStructureError): - _ = ExecutionEngine.init( - workflow_definition=WORKFLOW_WITH_NON_BATCH_ORIENTED_STEP_FEEDING_BATCH_ORIENTED_STEP_OPERATING_BATCH_WISE, - init_parameters=workflow_init_parameters, - max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, - ) + result = execution_engine.run(runtime_parameters={"non_batch_parameter": "some"}) + + # then + assert result == [{"result": 0.4}] WORKFLOW_WITH_NON_BATCH_ORIENTED_STEP_FEEDING_MIXED_INPUT_STEP = { @@ -1326,14 +1329,21 @@ def test_workflow_when_non_batch_oriented_step_feeds_compound_strictly_batch_ori "workflows_core.api_key": None, "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_BATCH_ORIENTED_STEP_FEEDING_COMPOUND_STRICTLY_BATCH_ORIENTED_STEP, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run( + runtime_parameters={ + "non_batch_parameter": "some" + } + ) # then - with pytest.raises(ExecutionGraphStructureError): - _ = ExecutionEngine.init( - workflow_definition=WORKFLOW_WITH_NON_BATCH_ORIENTED_STEP_FEEDING_COMPOUND_STRICTLY_BATCH_ORIENTED_STEP, - init_parameters=workflow_init_parameters, - max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, - ) + assert results == [{"result": 0.4}] WORKFLOW_WITH_BATCH_ORIENTED_STEP_FEEDING_COMPOUND_NON_BATCH_ORIENTED_STEP = { @@ -1740,14 +1750,17 @@ def test_workflow_when_non_batch_oriented_input_feeds_compound_strictly_batch_or "workflows_core.api_key": None, "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_BATCH_ORIENTED_INPUT_FEEDING_COMPOUND_STRICTLY_BATCH_ORIENTED_STEP, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run(runtime_parameters={"data": "some"}) # then - with pytest.raises(ExecutionGraphStructureError): - _ = ExecutionEngine.init( - workflow_definition=WORKFLOW_WITH_NON_BATCH_ORIENTED_INPUT_FEEDING_COMPOUND_STRICTLY_BATCH_ORIENTED_STEP, - init_parameters=workflow_init_parameters, - max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, - ) + assert result == [{'result': 0.4}] WORKFLOW_WITH_BATCH_ORIENTED_INPUT_FEEDING_COMPOUND_NON_BATCH_ORIENTED_STEP = { diff --git a/tests/workflows/unit_tests/execution_engine/executor/test_output_constructor.py b/tests/workflows/unit_tests/execution_engine/executor/test_output_constructor.py index 7b452748a2..08bce5656c 100644 --- a/tests/workflows/unit_tests/execution_engine/executor/test_output_constructor.py +++ b/tests/workflows/unit_tests/execution_engine/executor/test_output_constructor.py @@ -8,6 +8,7 @@ from inference.core.workflows.core_steps.loader import KINDS_SERIALIZERS from inference.core.workflows.errors import AssumptionError, ExecutionEngineRuntimeError +from inference.core.workflows.execution_engine.constants import TOP_LEVEL_LINEAGE_KEY, WORKFLOW_INPUT_BATCH_LINEAGE_ID from inference.core.workflows.execution_engine.entities.base import JsonField from inference.core.workflows.execution_engine.entities.types import ( IMAGE_KIND, @@ -497,6 +498,7 @@ def test_construct_workflow_output_when_batch_outputs_present() -> None: data_lookup = { "$steps.other.c": "c_value", } + execution_graph.graph[TOP_LEVEL_LINEAGE_KEY] = WORKFLOW_INPUT_BATCH_LINEAGE_ID def get_non_batch_data(selector: str) -> Any: return data_lookup[selector] From b72719cbbd1c0cabc495d4c7f8a099d5e955fd22 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Fri, 22 Aug 2025 10:00:50 +0200 Subject: [PATCH 05/20] Iterate to make decrease of dimensionality work --- .../introspection/schema_parser.py | 20 ++- .../execution_engine/v1/compiler/entities.py | 4 +- .../v1/compiler/graph_constructor.py | 121 ++++++++----- .../v1/dynamic_blocks/block_assembler.py | 6 + .../execution_engine/v1/executor/core.py | 22 ++- .../execution_data_manager/execution_cache.py | 6 +- .../execution_data_manager/manager.py | 68 +++---- .../step_input_assembler.py | 59 +++++-- .../v1/executor/output_constructor.py | 3 +- inference/core/workflows/prototypes/block.py | 9 +- tests/benchmarks/core/test_speed_benchmark.py | 7 +- tests/common.py | 67 ++++--- tests/conftest.py | 15 +- tests/google_colab/conftest.py | 10 +- .../test_footbal_ai_functionalities.py | 29 ++- .../test_supervision_interoperability.py | 45 +++-- tests/google_colab/test_yolo_models.py | 1 - .../unit_tests/models/test_rfdetr.py | 12 +- .../unit_tests/usage_tracking/conftest.py | 2 + .../usage_tracking/test_collector.py | 13 +- .../unit_tests/http/test_client.py | 122 ++++++++----- .../unit_tests/http/utils/test_loaders.py | 16 +- .../unit_tests/http/utils/test_requests.py | 8 +- .../__init__.py | 2 +- ...ow_with_invalid_dimension_of_references.py | 11 +- ...ilation_of_workflow_with_invalid_plugin.py | 5 +- .../plugin_image_producer/__init__.py | 92 +++++++--- ...ng_scalars_to_fit_into_batch_parameters.py | 166 ++++++++++++------ ...st_workflow_with_arbitrary_batch_inputs.py | 8 +- .../test_workflow_with_perception_encoder.py | 3 + .../test_workflow_with_property_extraction.py | 29 ++- .../test_detections_classes_replacement.py | 12 +- .../foundation/test_perception_encoder.py | 9 +- .../core_steps/sinks/test_onvif_movement.py | 2 +- .../core_steps/visualizations/test_icon.py | 16 +- .../visualizations/test_icon_alpha.py | 78 ++++---- .../executor/test_output_constructor.py | 5 +- 37 files changed, 716 insertions(+), 387 deletions(-) diff --git a/inference/core/workflows/execution_engine/introspection/schema_parser.py b/inference/core/workflows/execution_engine/introspection/schema_parser.py index 022b8312bf..256a85133b 100644 --- a/inference/core/workflows/execution_engine/introspection/schema_parser.py +++ b/inference/core/workflows/execution_engine/introspection/schema_parser.py @@ -64,12 +64,16 @@ def parse_block_manifest( inputs_accepting_batches_and_scalars = set( manifest_type.get_parameters_accepting_batches_and_scalars() ) + inputs_enforcing_auto_batch_casting = set( + manifest_type.get_parameters_enforcing_auto_batch_casting() + ) return parse_block_manifest_schema( schema=schema, inputs_dimensionality_offsets=inputs_dimensionality_offsets, dimensionality_reference_property=dimensionality_reference_property, inputs_accepting_batches=inputs_accepting_batches, inputs_accepting_batches_and_scalars=inputs_accepting_batches_and_scalars, + inputs_enforcing_auto_batch_casting=inputs_enforcing_auto_batch_casting, ) @@ -79,6 +83,7 @@ def parse_block_manifest_schema( dimensionality_reference_property: Optional[str], inputs_accepting_batches: Set[str], inputs_accepting_batches_and_scalars: Set[str], + inputs_enforcing_auto_batch_casting: Set[str], ) -> BlockManifestMetadata: primitive_types = retrieve_primitives_from_schema( schema=schema, @@ -89,6 +94,7 @@ def parse_block_manifest_schema( dimensionality_reference_property=dimensionality_reference_property, inputs_accepting_batches=inputs_accepting_batches, inputs_accepting_batches_and_scalars=inputs_accepting_batches_and_scalars, + inputs_enforcing_auto_batch_casting=inputs_enforcing_auto_batch_casting, ) return BlockManifestMetadata( primitive_types=primitive_types, @@ -255,6 +261,7 @@ def retrieve_selectors_from_schema( dimensionality_reference_property: Optional[str], inputs_accepting_batches: Set[str], inputs_accepting_batches_and_scalars: Set[str], + inputs_enforcing_auto_batch_casting: Set[str], ) -> Dict[str, SelectorDefinition]: result = [] for property_name, property_definition in schema[PROPERTIES_KEY].items(): @@ -277,6 +284,7 @@ def retrieve_selectors_from_schema( is_list_element=True, inputs_accepting_batches=inputs_accepting_batches, inputs_accepting_batches_and_scalars=inputs_accepting_batches_and_scalars, + inputs_enforcing_auto_batch_casting=inputs_enforcing_auto_batch_casting, ) elif property_definition.get(TYPE_KEY) == OBJECT_TYPE and isinstance( property_definition.get(ADDITIONAL_PROPERTIES_KEY), dict @@ -290,6 +298,7 @@ def retrieve_selectors_from_schema( is_dict_element=True, inputs_accepting_batches=inputs_accepting_batches, inputs_accepting_batches_and_scalars=inputs_accepting_batches_and_scalars, + inputs_enforcing_auto_batch_casting=inputs_enforcing_auto_batch_casting, ) else: selector = retrieve_selectors_from_simple_property( @@ -300,6 +309,7 @@ def retrieve_selectors_from_schema( is_dimensionality_reference_property=is_dimensionality_reference_property, inputs_accepting_batches=inputs_accepting_batches, inputs_accepting_batches_and_scalars=inputs_accepting_batches_and_scalars, + inputs_enforcing_auto_batch_casting=inputs_enforcing_auto_batch_casting, ) if selector is not None: result.append(selector) @@ -314,6 +324,7 @@ def retrieve_selectors_from_simple_property( is_dimensionality_reference_property: bool, inputs_accepting_batches: Set[str], inputs_accepting_batches_and_scalars: Set[str], + inputs_enforcing_auto_batch_casting: Set[str], is_list_element: bool = False, is_dict_element: bool = False, ) -> Optional[SelectorDefinition]: @@ -325,7 +336,10 @@ def retrieve_selectors_from_simple_property( if property_name in inputs_accepting_batches_and_scalars: points_to_batch = {True, False} else: - points_to_batch = {property_name in inputs_accepting_batches} + points_to_batch = { + property_name in inputs_accepting_batches + or property_name in inputs_enforcing_auto_batch_casting + } else: points_to_batch = {declared_points_to_batch} allowed_references = [ @@ -359,6 +373,7 @@ def retrieve_selectors_from_simple_property( is_dimensionality_reference_property=is_dimensionality_reference_property, inputs_accepting_batches=inputs_accepting_batches, inputs_accepting_batches_and_scalars=inputs_accepting_batches_and_scalars, + inputs_enforcing_auto_batch_casting=inputs_enforcing_auto_batch_casting, is_list_element=True, ) if property_defines_union(property_definition=property_definition): @@ -372,6 +387,7 @@ def retrieve_selectors_from_simple_property( is_dimensionality_reference_property=is_dimensionality_reference_property, inputs_accepting_batches=inputs_accepting_batches, inputs_accepting_batches_and_scalars=inputs_accepting_batches_and_scalars, + inputs_enforcing_auto_batch_casting=inputs_enforcing_auto_batch_casting, ) return None @@ -394,6 +410,7 @@ def retrieve_selectors_from_union_definition( is_dimensionality_reference_property: bool, inputs_accepting_batches: Set[str], inputs_accepting_batches_and_scalars: Set[str], + inputs_enforcing_auto_batch_casting: Set[str], ) -> Optional[SelectorDefinition]: union_types = ( union_definition.get(ANY_OF_KEY, []) @@ -410,6 +427,7 @@ def retrieve_selectors_from_union_definition( is_dimensionality_reference_property=is_dimensionality_reference_property, inputs_accepting_batches=inputs_accepting_batches, inputs_accepting_batches_and_scalars=inputs_accepting_batches_and_scalars, + inputs_enforcing_auto_batch_casting=inputs_enforcing_auto_batch_casting, is_list_element=is_list_element, ) if result is None: diff --git a/inference/core/workflows/execution_engine/v1/compiler/entities.py b/inference/core/workflows/execution_engine/v1/compiler/entities.py index f5ee3e216f..b95fa9e9d3 100644 --- a/inference/core/workflows/execution_engine/v1/compiler/entities.py +++ b/inference/core/workflows/execution_engine/v1/compiler/entities.py @@ -230,7 +230,9 @@ class StepNode(ExecutionGraphNode): child_execution_branches: Dict[str, str] = field(default_factory=dict) execution_branches_impacting_inputs: Set[str] = field(default_factory=set) batch_oriented_parameters: Set[str] = field(default_factory=set) - auto_batch_casting_lineage_supports: Dict[str, AutoBatchCastingConfig] = field(default_factory=dict) + auto_batch_casting_lineage_supports: Dict[str, AutoBatchCastingConfig] = field( + default_factory=dict + ) step_execution_dimensionality: int = 0 def controls_flow(self) -> bool: diff --git a/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py b/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py index 12cc3946ec..8308a62cbf 100644 --- a/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py +++ b/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py @@ -1,7 +1,7 @@ import itertools from collections import defaultdict from copy import copy, deepcopy -from typing import Any, Dict, List, Optional, Set, Tuple, Union, Callable +from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union from uuid import uuid4 import networkx as nx @@ -22,7 +22,8 @@ from inference.core.workflows.execution_engine.constants import ( NODE_COMPILATION_OUTPUT_PROPERTY, PARSED_NODE_INPUT_SELECTORS_PROPERTY, - WORKFLOW_INPUT_BATCH_LINEAGE_ID, TOP_LEVEL_LINEAGE_KEY, + TOP_LEVEL_LINEAGE_KEY, + WORKFLOW_INPUT_BATCH_LINEAGE_ID, ) from inference.core.workflows.execution_engine.entities.base import ( InputType, @@ -44,6 +45,7 @@ execution_phase, ) from inference.core.workflows.execution_engine.v1.compiler.entities import ( + AutoBatchCastingConfig, CompoundStepInputDefinition, DictOfStepInputDefinitions, DynamicStepInputDefinition, @@ -60,7 +62,7 @@ StaticStepInputDefinition, StepInputData, StepInputDefinition, - StepNode, AutoBatchCastingConfig, + StepNode, ) from inference.core.workflows.execution_engine.v1.compiler.graph_traversal import ( traverse_graph_ensuring_parents_are_reached_first, @@ -609,18 +611,20 @@ def denote_data_flow_in_workflow( execution_graph=execution_graph, node=node, block_manifest_by_step_name=block_manifest_by_step_name, - on_top_level_lineage_denoted=lambda element: top_level_data_lineage.add(element) + on_top_level_lineage_denoted=lambda element: top_level_data_lineage.add( + element + ), ) execution_graph.remove_node(super_input_node) if len(top_level_data_lineage) > 1: raise AssumptionError( public_message=f"Workflow Compiler detected that the workflow contains multiple elements which create " - f"top-level data batches - for instance inputs and blocks that create batched outputs from " - f"scalar parameters. We know it sounds convoluted, but the bottom line is that this " - f"situation is known limitation of Workflows Compiler. " - f"Contact Roboflow team through github issues " - f"(https://github.com/roboflow/inference/issues) providing full " - f"context of the problem - including workflow definition you use.", + f"top-level data batches - for instance inputs and blocks that create batched outputs from " + f"scalar parameters. We know it sounds convoluted, but the bottom line is that this " + f"situation is known limitation of Workflows Compiler. " + f"Contact Roboflow team through github issues " + f"(https://github.com/roboflow/inference/issues) providing full " + f"context of the problem - including workflow definition you use.", context="workflow_compilation | execution_graph_construction | verification_of_batches_sources", ) if len(top_level_data_lineage) > 0: @@ -697,7 +701,7 @@ def denote_data_flow_for_step( execution_graph: DiGraph, node: str, manifest: WorkflowBlockManifest, - on_top_level_lineage_denoted: Callable[[str], None] + on_top_level_lineage_denoted: Callable[[str], None], ) -> DiGraph: all_control_flow_predecessors, all_non_control_flow_predecessors = ( separate_flow_control_predecessors_from_data_providers( @@ -722,11 +726,13 @@ def denote_data_flow_for_step( batch_compatibility_of_properties = retrieve_batch_compatibility_of_input_selectors( input_selectors=parsed_step_input_selectors ) - scalar_parameters_to_be_batched = verify_declared_batch_compatibility_against_actual_inputs( - node=node, - step_node_data=step_node_data, - input_data=input_data, - batch_compatibility_of_properties=batch_compatibility_of_properties, + scalar_parameters_to_be_batched = ( + verify_declared_batch_compatibility_against_actual_inputs( + node=node, + step_node_data=step_node_data, + input_data=input_data, + batch_compatibility_of_properties=batch_compatibility_of_properties, + ) ) input_dimensionality_offsets = manifest.get_input_dimensionality_offsets() print("input_dimensionality_offsets", input_dimensionality_offsets) @@ -734,6 +740,7 @@ def denote_data_flow_for_step( step_name=step_name, input_dimensionality_offsets=input_dimensionality_offsets, ) + print("scalar_parameters_to_be_batched", scalar_parameters_to_be_batched) inputs_dimensionalities = get_inputs_dimensionalities( step_name=step_name, step_type=manifest.type, @@ -791,7 +798,9 @@ def denote_data_flow_for_step( output_dimensionality_offset=output_dimensionality_offset, ) ) - truly_batch_parameters = parameters_with_batch_inputs.difference(scalar_parameters_to_be_batched) + truly_batch_parameters = parameters_with_batch_inputs.difference( + scalar_parameters_to_be_batched + ) if not truly_batch_parameters: if manifest.get_output_dimensionality_offset() > 0: # brave decision to open a Pandora box @@ -809,7 +818,7 @@ def denote_data_flow_for_step( lineage_supports = get_lineage_support_for_auto_batch_casted_parameters( input_dimensionalities=inputs_dimensionalities, all_lineages_of_batch_parameters=all_lineages, - scalar_parameters_to_be_batched=scalar_parameters_to_be_batched + scalar_parameters_to_be_batched=scalar_parameters_to_be_batched, ) step_node_data.auto_batch_casting_lineage_supports = lineage_supports print("lineage_supports", lineage_supports) @@ -1200,7 +1209,7 @@ def verify_output_offset( if not parameters_with_batch_inputs and output_dimensionality_offset < 0: raise StepInputDimensionalityError( public_message=f"Block defining step {step_name} defines negative dimensionality offset while only " - f"scalar inputs being provided - the block cannot run as there is no dimension to collapse.", + f"scalar inputs being provided - the block cannot run as there is no dimension to collapse.", context="workflow_compilation | execution_graph_construction | verification_of_output_offset", ) if ( @@ -1428,7 +1437,11 @@ def get_inputs_dimensionalities( ) -> Dict[str, Set[int]]: result = defaultdict(set) dimensionalities_spotted = set() - offset_parameters = {parameter: value for parameter, value in input_dimensionality_offsets.items() if value > 0} + offset_parameters = { + parameter: value + for parameter, value in input_dimensionality_offsets.items() + if value > 0 + } non_offset_parameters_dimensionality_values = set() for property_name, input_definition in input_data.items(): if property_name in offset_parameters: @@ -1436,13 +1449,17 @@ def get_inputs_dimensionalities( if input_definition.is_compound_input(): for value in input_definition.iterate_through_definitions(): if value.is_batch_oriented(): - non_offset_parameters_dimensionality_values.add(value.get_dimensionality()) + non_offset_parameters_dimensionality_values.add( + value.get_dimensionality() + ) elif input_definition.is_batch_oriented(): - non_offset_parameters_dimensionality_values.add(input_definition.get_dimensionality()) + non_offset_parameters_dimensionality_values.add( + input_definition.get_dimensionality() + ) if len(non_offset_parameters_dimensionality_values) > 1: raise StepInputDimensionalityError( public_message=f"For step {step_name} attempted to plug input data that are in different dimensions, " - f"whereas block defines the inputs to be equal in that terms.", + f"whereas block defines the inputs to be equal in that terms.", context="workflow_compilation | execution_graph_construction | collecting_step_input_data", blocks_errors=[ WorkflowBlockError( @@ -1452,7 +1469,11 @@ def get_inputs_dimensionalities( ) ], ) - non_offset_parameters_dimensionality_value = non_offset_parameters_dimensionality_values.pop() if len(non_offset_parameters_dimensionality_values) > 0 else 1 + non_offset_parameters_dimensionality_value = ( + non_offset_parameters_dimensionality_values.pop() + if len(non_offset_parameters_dimensionality_values) > 0 + else 1 + ) for property_name, input_definition in input_data.items(): if input_definition.is_compound_input(): result[property_name] = get_compound_input_dimensionality( @@ -1469,7 +1490,10 @@ def get_inputs_dimensionalities( if property_name not in offset_parameters: result[property_name] = {non_offset_parameters_dimensionality_value} else: - result[property_name] = non_offset_parameters_dimensionality_value + offset_parameters[property_name] + result[property_name] = ( + non_offset_parameters_dimensionality_value + + offset_parameters[property_name] + ) else: result[property_name] = {input_definition.get_dimensionality()} dimensionalities_spotted.update(result[property_name]) @@ -1504,12 +1528,18 @@ def get_compound_input_dimensionality( ) -> Set[int]: dimensionalities_spotted = set() for definition in input_definition.iterate_through_definitions(): - if property_name not in scalar_parameters_to_be_batched or definition.is_batch_oriented(): + if ( + property_name not in scalar_parameters_to_be_batched + or definition.is_batch_oriented() + ): dimensionalities_spotted.add(definition.get_dimensionality()) elif property_name not in offset_parameters: dimensionalities_spotted.add(non_offset_parameters_dimensionality_value) else: - dimensionalities_spotted.add(non_offset_parameters_dimensionality_value + offset_parameters[property_name]) + dimensionalities_spotted.add( + non_offset_parameters_dimensionality_value + + offset_parameters[property_name] + ) non_zero_dimensionalities = {e for e in dimensionalities_spotted if e != 0} if len(non_zero_dimensionalities) > 1: raise StepInputDimensionalityError( @@ -1539,7 +1569,9 @@ def grab_parameters_defining_batch_inputs( return result -def retrieve_batch_compatibility_of_input_selectors(input_selectors: List[ParsedSelector]) -> Dict[str, Set[bool]]: +def retrieve_batch_compatibility_of_input_selectors( + input_selectors: List[ParsedSelector], +) -> Dict[str, Set[bool]]: batch_compatibility_of_properties = defaultdict(set) for parsed_selector in input_selectors: for reference in parsed_selector.definition.allowed_references: @@ -1583,12 +1615,11 @@ def verify_declared_batch_compatibility_against_actual_inputs( f"step inputs are filled with outputs of batch-oriented steps or batch-oriented inputs.", context="workflow_compilation | execution_graph_construction", ) - if ( - step_accepts_batch_input - and batch_compatibility == {True} - and False in actual_input_is_batch - ): + if batch_compatibility == {True} and False in actual_input_is_batch: scalar_parameters_to_be_batched.add(property_name) + print( + f"property_name: {property_name}, batch_compatibility={batch_compatibility}, actual_input_is_batch={actual_input_is_batch}, step_accepts_batch_input={step_accepts_batch_input}" + ) return scalar_parameters_to_be_batched @@ -1638,7 +1669,9 @@ def get_lineage_support_for_auto_batch_casted_parameters( return result -def find_longest_lineage_support(all_lineages_of_batch_parameters: List[List[str]]) -> Optional[List[str]]: +def find_longest_lineage_support( + all_lineages_of_batch_parameters: List[List[str]], +) -> Optional[List[str]]: longest_longest_lineage_support = [] for lineage in all_lineages_of_batch_parameters: if len(lineage) > len(longest_longest_lineage_support): @@ -1764,13 +1797,13 @@ def establish_batch_oriented_step_lineage( ) if output_dimensionality_offset < 0: result_dimensionality = reference_lineage[:output_dimensionality_offset] - if len(result_dimensionality) == 0: - raise StepOutputLineageError( - public_message=f"Step {step_selector} is to decrease dimensionality, but it is not possible if " - f"input dimensionality is not greater or equal 2, otherwise output would not " - f"be batch-oriented.", - context="workflow_compilation | execution_graph_construction | establishing_step_output_lineage", - ) + # if len(result_dimensionality) == 0: + # raise StepOutputLineageError( + # public_message=f"Step {step_selector} is to decrease dimensionality, but it is not possible if " + # f"input dimensionality is not greater or equal 2, otherwise output would not " + # f"be batch-oriented.", + # context="workflow_compilation | execution_graph_construction | establishing_step_output_lineage", + # ) return result_dimensionality if output_dimensionality_offset == 0: return reference_lineage @@ -1793,7 +1826,7 @@ def get_reference_lineage( f"This is most likely the bug. Contact Roboflow team through github issues " f"(https://github.com/roboflow/inference/issues) providing full " f"context of the problem - including workflow definition you use.", - context="workflow_compilation | execution_graph_construction | collecting_step_inputs", + context="workflow_compilation | execution_graph_construction | collecting_step_inputs_lineage", ) property_data = input_data[dimensionality_reference_property] if property_data.is_compound_input(): @@ -1809,7 +1842,7 @@ def get_reference_lineage( f"Contact Roboflow team through github issues " f"(https://github.com/roboflow/inference/issues) providing full " f"context of the problem - including workflow definition you use.", - context="workflow_compilation | execution_graph_construction | collecting_step_inputs", + context="workflow_compilation | execution_graph_construction | collecting_step_inputs_lineage", ) if not property_data.is_batch_oriented(): raise AssumptionError( @@ -1818,7 +1851,7 @@ def get_reference_lineage( f"Contact Roboflow team through github issues " f"(https://github.com/roboflow/inference/issues) providing full " f"context of the problem - including workflow definition you use.", - context="workflow_compilation | execution_graph_construction | collecting_step_inputs", + context="workflow_compilation | execution_graph_construction | collecting_step_inputs_lineage", ) return copy(property_data.data_lineage) diff --git a/inference/core/workflows/execution_engine/v1/dynamic_blocks/block_assembler.py b/inference/core/workflows/execution_engine/v1/dynamic_blocks/block_assembler.py index 46e06046ab..bcf4e88814 100644 --- a/inference/core/workflows/execution_engine/v1/dynamic_blocks/block_assembler.py +++ b/inference/core/workflows/execution_engine/v1/dynamic_blocks/block_assembler.py @@ -381,6 +381,12 @@ def assembly_manifest_class_methods( "get_parameters_accepting_batches_and_scalars", classmethod(get_parameters_accepting_batches_and_scalars), ) + get_parameters_enforcing_auto_batch_casting = lambda cls: list() + setattr( + manifest_class, + "get_parameters_enforcing_auto_batch_casting", + classmethod(get_parameters_enforcing_auto_batch_casting), + ) input_dimensionality_offsets = collect_input_dimensionality_offsets( inputs=manifest_description.inputs ) diff --git a/inference/core/workflows/execution_engine/v1/executor/core.py b/inference/core/workflows/execution_engine/v1/executor/core.py index 042ed0f9c1..fe4147d561 100644 --- a/inference/core/workflows/execution_engine/v1/executor/core.py +++ b/inference/core/workflows/execution_engine/v1/executor/core.py @@ -156,7 +156,9 @@ def run_step( execution_data_manager: ExecutionDataManager, profiler: WorkflowsProfiler, ) -> None: - print(f"{step_selector} - IS SIMD: {execution_data_manager.is_step_simd(step_selector=step_selector)}") + print( + f"{step_selector} - IS SIMD: {execution_data_manager.is_step_simd(step_selector=step_selector)}" + ) if execution_data_manager.is_step_simd(step_selector=step_selector): return run_simd_step( step_selector=step_selector, @@ -181,8 +183,22 @@ def run_simd_step( step_name = get_last_chunk_of_selector(selector=step_selector) step_instance = workflow.steps[step_name].step step_manifest = workflow.steps[step_name].manifest - print(f"{step_selector} - accepts_batch_input: {step_manifest.accepts_batch_input()}") - if step_manifest.accepts_batch_input(): + print( + f"{step_selector} - accepts_batch_input: {step_manifest.accepts_batch_input()}" + ) + collapse_of_batch_to_scalar_expected = ( + step_manifest.get_output_dimensionality_offset() < 0 + and not execution_data_manager.does_step_produce_batches( + step_selector=step_selector + ) + ) + print( + "collapse_of_batch_to_scalar_expected", + collapse_of_batch_to_scalar_expected, + step_manifest.get_output_dimensionality_offset(), + execution_data_manager.does_step_produce_batches(step_selector=step_selector), + ) + if step_manifest.accepts_batch_input() or collapse_of_batch_to_scalar_expected: return run_simd_step_in_batch_mode( step_selector=step_selector, step_instance=step_instance, diff --git a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/execution_cache.py b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/execution_cache.py index 7cef6ac6d9..14434d1683 100644 --- a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/execution_cache.py +++ b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/execution_cache.py @@ -101,7 +101,11 @@ def register_batch_of_step_outputs( self._cache_content[step_name].register_outputs( indices=indices, outputs=outputs ) - print("VERIF", id(self._cache_content[step_name]), self._cache_content[step_name]._cache_content) + print( + "VERIF", + id(self._cache_content[step_name]), + self._cache_content[step_name]._cache_content, + ) self._step_outputs_registered.add(step_name) except (TypeError, AttributeError) as e: # checking this case defensively as there is no guarantee on block diff --git a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py index 2605ec2524..489cc6ca3d 100644 --- a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py +++ b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py @@ -3,7 +3,7 @@ from networkx import DiGraph from inference.core import logger -from inference.core.workflows.errors import ExecutionEngineRuntimeError, AssumptionError +from inference.core.workflows.errors import AssumptionError, ExecutionEngineRuntimeError from inference.core.workflows.execution_engine.constants import ( NODE_COMPILATION_OUTPUT_PROPERTY, ) @@ -140,8 +140,10 @@ def register_non_simd_step_output( ) if step_node.output_dimensionality == 1: # we only allow +1 dim increase for now, so it is fine to only handle this case - indices = [(i, ) for i in range(len(output))] - print(f"DIMENSIONALITY WAS JUST BORN FOR LINEAGE: {step_node.data_lineage} with indices: {indices} :)") + indices = [(i,) for i in range(len(output))] + print( + f"DIMENSIONALITY WAS JUST BORN FOR LINEAGE: {step_node.data_lineage} with indices: {indices} :)" + ) self._dynamic_batches_manager.register_element_indices_for_lineage( lineage=step_node.data_lineage, indices=indices, @@ -150,10 +152,10 @@ def register_non_simd_step_output( if not all(isinstance(element, FlowControl) for element in output): raise ExecutionEngineRuntimeError( public_message=f"Error in execution engine. Flow control step {step_name} " - f"expected to only produce FlowControl objects. This is most likely bug. " - f"Contact Roboflow team through github issues " - f"(https://github.com/roboflow/inference/issues) providing full context of" - f"the problem - including workflow definition you use.", + f"expected to only produce FlowControl objects. This is most likely bug. " + f"Contact Roboflow team through github issues " + f"(https://github.com/roboflow/inference/issues) providing full context of" + f"the problem - including workflow definition you use.", context="workflow_execution | step_output_registration", ) self._register_flow_control_output_for_simd_step( @@ -268,19 +270,23 @@ def register_simd_step_output( step_name = get_last_chunk_of_selector(selector=step_selector) if step_node.output_dimensionality == 0: print("COLLAPSE") + print("outputs", outputs) # SIMD step collapsing into scalar (can happen for auto-batch casting of parameters) - if not isinstance(outputs, list) or len(outputs) != 1: - raise ExecutionEngineRuntimeError( - public_message=f"Error in execution engine. In context of SIMD step: {step_selector} attempts to " - f"register output which should collapse into a scalar, but detected batched output " - f"with more than a single element (or incompatible output), " - f"making the operation not possible. This is most likely bug (either a block or " - f"Execution Engine is faulty). Contact Roboflow team through github issues " - f"(https://github.com/roboflow/inference/issues) providing full context of" - f"the problem - including workflow definition you use.", - context="workflow_execution | step_output_registration", - ) - output = outputs[0] + if isinstance(outputs, list): + if len(outputs) != 1: + raise ExecutionEngineRuntimeError( + public_message=f"Error in execution engine. In context of SIMD step: {step_selector} attempts to " + f"register output which should collapse into a scalar, but detected batched output " + f"with more than a single element (or incompatible output), " + f"making the operation not possible. This is most likely bug (either a block or " + f"Execution Engine is faulty). Contact Roboflow team through github issues " + f"(https://github.com/roboflow/inference/issues) providing full context of" + f"the problem - including workflow definition you use.", + context="workflow_execution | step_output_registration", + ) + output = outputs[0] + else: + output = outputs if isinstance(output, FlowControl): self._register_flow_control_output_for_non_simd_step( step_node=step_node, @@ -360,7 +366,9 @@ def get_selector_indices(self, selector: str) -> Optional[List[DynamicBatchIndex f"the problem - including workflow definition you use.", context="workflow_execution | getting_workflow_data_indices", ) - print(f"get_selector_indices(selector={selector}): - selector_lineage: {selector_lineage}") + print( + f"get_selector_indices(selector={selector}): - selector_lineage: {selector_lineage}" + ) if not selector_lineage: return None return self.get_lineage_indices(lineage=selector_lineage) @@ -390,9 +398,7 @@ def get_non_batch_data(self, selector: str) -> Any: ) and not self.does_input_represent_batch(input_selector=selector): input_name = get_last_chunk_of_selector(selector=selector) return self._runtime_parameters[input_name] - elif is_step_selector( - selector_or_value=potential_step_selector - ): + elif is_step_selector(selector_or_value=potential_step_selector): step_node_data = node_as( execution_graph=self._execution_graph, node=potential_step_selector, @@ -401,12 +407,12 @@ def get_non_batch_data(self, selector: str) -> Any: if step_node_data.output_dimensionality != 0: raise ExecutionEngineRuntimeError( public_message=f"Error in execution engine. Attempted to get value of: {selector}, " - f"which was supposed to be registered as scalar output, but in fact Execution " - f"Engine denoted the output as batched one (with dimensionality: " - f"{step_node_data.output_dimensionality}). " - f"This is most likely bug. Contact Roboflow team through github issues " - f"(https://github.com/roboflow/inference/issues) providing full context of" - f"the problem - including workflow definition you use.", + f"which was supposed to be registered as scalar output, but in fact Execution " + f"Engine denoted the output as batched one (with dimensionality: " + f"{step_node_data.output_dimensionality}). " + f"This is most likely bug. Contact Roboflow team through github issues " + f"(https://github.com/roboflow/inference/issues) providing full context of" + f"the problem - including workflow definition you use.", context="workflow_execution | getting_workflow_data", ) step_name = get_last_chunk_of_selector(selector=potential_step_selector) @@ -460,7 +466,9 @@ def get_batch_data( step_name=step_name, batch_elements_indices=indices, ) - print(f"Getting batch results with selector: {selector} from indices: {indices}") + print( + f"Getting batch results with selector: {selector} from indices: {indices}" + ) return self._execution_cache.get_batch_output( selector=selector, batch_elements_indices=indices, diff --git a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py index bebef66217..057e5dbc77 100644 --- a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py +++ b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py @@ -4,11 +4,12 @@ from inference.core.workflows.errors import AssumptionError, ExecutionEngineRuntimeError from inference.core.workflows.execution_engine.entities.base import Batch from inference.core.workflows.execution_engine.v1.compiler.entities import ( + AutoBatchCastingConfig, CompoundStepInputDefinition, DynamicStepInputDefinition, StaticStepInputDefinition, StepInputDefinition, - StepNode, AutoBatchCastingConfig, + StepNode, ) from inference.core.workflows.execution_engine.v1.compiler.utils import ( get_last_chunk_of_selector, @@ -358,6 +359,7 @@ def prepare_parameters( runtime_parameters: Dict[str, Any], execution_cache: ExecutionCache, ) -> BatchModeSIMDStepInput: + print("DDD", step_node.auto_batch_casting_lineage_supports) result = {} indices_for_parameter = {} guard_of_indices_wrapping = GuardForIndicesWrapping() @@ -399,7 +401,7 @@ def prepare_parameters( contains_empty_scalar_step_output_selector or value_contains_empty_scalar_step_output_selector ) - print("indices_for_parameter", indices_for_parameter) + print("indices_for_parameter", indices_for_parameter, result) batch_parameters_indices = [ i for i in indices_for_parameter.values() if i is not None ] @@ -509,7 +511,10 @@ def get_non_compound_parameter_value( auto_batch_casting_lineage_supports: Dict[str, AutoBatchCastingConfig], ) -> Tuple[Any, Optional[List[DynamicBatchIndex]], bool]: if not parameter.is_batch_oriented(): - requested_as_batch = parameter.parameter_specification.parameter_name in auto_batch_casting_lineage_supports + requested_as_batch = ( + parameter.parameter_specification.parameter_name + in auto_batch_casting_lineage_supports + ) if parameter.points_to_input(): input_parameter: DynamicStepInputDefinition = parameter # type: ignore parameter_name = get_last_chunk_of_selector( @@ -625,6 +630,8 @@ def get_non_compound_parameter_value( f"the problem - including workflow definition you use.", context="workflow_execution | step_input_assembling", ) + if step_execution_dimensionality == 0: + return Batch(batch_input, lineage_indices), lineage_indices, False upper_level_indices = dynamic_batches_manager.get_indices_for_data_lineage( lineage=dynamic_parameter.data_lineage[:-1], ) @@ -646,9 +653,11 @@ def apply_auto_batch_casting( step_execution_dimensionality: int, guard_of_indices_wrapping: GuardForIndicesWrapping, ) -> Tuple[Any, List[DynamicBatchIndex], bool]: - print(f"parameter_name: {parameter_name} - auto_batch_casting_config: {auto_batch_casting_config}") + print( + f"parameter_name: {parameter_name} - auto_batch_casting_config: {auto_batch_casting_config}" + ) if auto_batch_casting_config.lineage_support is None: - indices = [(0, ) * auto_batch_casting_config.casted_dimensionality] + indices = [(0,) * auto_batch_casting_config.casted_dimensionality] else: indices = dynamic_batches_manager.get_indices_for_data_lineage( lineage=auto_batch_casting_config.lineage_support, @@ -657,7 +666,7 @@ def apply_auto_batch_casting( auto_batch_casting_config.lineage_support ) if missing_dimensions > 0: - padding = (0, ) * missing_dimensions + padding = (0,) * missing_dimensions indices = [i + padding for i in indices] batch_content = [value] * len(indices) created_batch = Batch(content=batch_content, indices=indices) @@ -675,7 +684,13 @@ def apply_auto_batch_casting( f"the problem - including workflow definition you use.", context="workflow_execution | step_input_assembling", ) - if abs(auto_batch_casting_config.casted_dimensionality - step_execution_dimensionality) > 1: + if ( + abs( + auto_batch_casting_config.casted_dimensionality + - step_execution_dimensionality + ) + > 1 + ): raise ExecutionEngineRuntimeError( public_message=f"Detected a situation when parameter: " f"{parameter_name} has auto batch casted " @@ -687,24 +702,36 @@ def apply_auto_batch_casting( f"the problem - including workflow definition you use.", context="workflow_execution | step_input_assembling", ) + print(f"SSSS, step_execution_dimensionality: {step_execution_dimensionality}") + upper_level_lineage_dimensionality = ( + auto_batch_casting_config.casted_dimensionality - 1 + ) + if upper_level_lineage_dimensionality == 0: + # for batch collapse into scalar + return created_batch, indices, contains_empty_scalar_step_output_selector if auto_batch_casting_config.lineage_support is None: upper_level_indices = [indices[0][:-1]] else: - upper_level_lineage_dimensionality = auto_batch_casting_config.casted_dimensionality - 1 - upper_level_lineage = auto_batch_casting_config.lineage_support[:upper_level_lineage_dimensionality] - if upper_level_lineage_dimensionality < 1 or len(upper_level_lineage) < upper_level_lineage_dimensionality: - raise ExecutionEngineRuntimeError( + upper_level_lineage = auto_batch_casting_config.lineage_support[ + :upper_level_lineage_dimensionality + ] + if ( + upper_level_lineage_dimensionality < 1 + or len(upper_level_lineage) < upper_level_lineage_dimensionality + ): + raise AssumptionError( public_message=f"Detected a situation when parameter: {parameter_name} requires dimensionality " - f"wrapping, but registered lineage support is incompatible which should be detected " - f"by the compiler. This is most likely a bug. " - f"Contact Roboflow team through github issues " - f"(https://github.com/roboflow/inference/issues) providing full context of" - f"the problem - including workflow definition you use.", + f"wrapping, but registered lineage support is incompatible which should be detected " + f"by the compiler. This is most likely a bug. " + f"Contact Roboflow team through github issues " + f"(https://github.com/roboflow/inference/issues) providing full context of" + f"the problem - including workflow definition you use.", context="workflow_execution | step_input_assembling", ) upper_level_indices = dynamic_batches_manager.get_indices_for_data_lineage( lineage=upper_level_lineage, ) + print("REDUCTION!") result = reduce_batch_dimensionality( indices=indices, upper_level_index=upper_level_indices, diff --git a/inference/core/workflows/execution_engine/v1/executor/output_constructor.py b/inference/core/workflows/execution_engine/v1/executor/output_constructor.py index dacaacab7d..7ab08c9c2e 100644 --- a/inference/core/workflows/execution_engine/v1/executor/output_constructor.py +++ b/inference/core/workflows/execution_engine/v1/executor/output_constructor.py @@ -11,7 +11,8 @@ ) from inference.core.workflows.errors import AssumptionError, ExecutionEngineRuntimeError from inference.core.workflows.execution_engine.constants import ( - WORKFLOW_INPUT_BATCH_LINEAGE_ID, TOP_LEVEL_LINEAGE_KEY, + TOP_LEVEL_LINEAGE_KEY, + WORKFLOW_INPUT_BATCH_LINEAGE_ID, ) from inference.core.workflows.execution_engine.entities.base import ( CoordinatesSystem, diff --git a/inference/core/workflows/prototypes/block.py b/inference/core/workflows/prototypes/block.py index cdb01e783f..e14a16bca6 100644 --- a/inference/core/workflows/prototypes/block.py +++ b/inference/core/workflows/prototypes/block.py @@ -56,8 +56,9 @@ def get_output_dimensionality_offset( @classmethod def accepts_batch_input(cls) -> bool: - return len(cls.get_parameters_accepting_batches()) > 0 or len( - cls.get_parameters_accepting_batches_and_scalars() + return ( + len(cls.get_parameters_accepting_batches()) > 0 + or len(cls.get_parameters_accepting_batches_and_scalars()) > 0 ) @classmethod @@ -68,6 +69,10 @@ def get_parameters_accepting_batches(cls) -> List[str]: def get_parameters_accepting_batches_and_scalars(cls) -> List[str]: return [] + @classmethod + def get_parameters_enforcing_auto_batch_casting(cls) -> List[str]: + return [] + @classmethod def accepts_empty_values(cls) -> bool: return False diff --git a/tests/benchmarks/core/test_speed_benchmark.py b/tests/benchmarks/core/test_speed_benchmark.py index 1fd1cd80e9..4cf0d8fa65 100644 --- a/tests/benchmarks/core/test_speed_benchmark.py +++ b/tests/benchmarks/core/test_speed_benchmark.py @@ -14,7 +14,7 @@ def dataset_reference() -> tuple[list[np.ndarray], set[tuple[int, int]]]: # args of inference benchmark python-package-speed -m yolov8n-seg-640 -bi 10000 command args = { - "dataset_reference": "coco", + "dataset_reference": "coco", "warm_up_inferences": 10, "benchmark_inferences": 10000, "batch_size": 1, @@ -23,6 +23,7 @@ def dataset_reference() -> tuple[list[np.ndarray], set[tuple[int, int]]]: "output_location": None, } + def test_benchmark_equivalent_rfdetr(benchmark, dataset_reference): images, image_sizes = dataset_reference @@ -30,6 +31,7 @@ def test_benchmark_equivalent_rfdetr(benchmark, dataset_reference): benchmark(model.infer, images) + def test_benchmark_equivalent_yolov8n_seg(benchmark, dataset_reference): images, image_sizes = dataset_reference @@ -37,9 +39,10 @@ def test_benchmark_equivalent_yolov8n_seg(benchmark, dataset_reference): benchmark(model.infer, images) + def test_benchmark_equivalent_yolov8n(benchmark, dataset_reference): images, image_sizes = dataset_reference model = get_model(model_id="yolov8n-640", api_key=None) - benchmark(model.infer, images) \ No newline at end of file + benchmark(model.infer, images) diff --git a/tests/common.py b/tests/common.py index 157a4e6373..f66492b240 100644 --- a/tests/common.py +++ b/tests/common.py @@ -20,12 +20,14 @@ def assert_localized_predictions_match( # in that, if after sorting by confidence the predictions are not ordered the same, likely they wouldn't pass this assertion anyway # the rigid assumption there is that the smallest gap between confidences is higher than our similarity threshold - assert len(sv_result_prediction) == len(sv_reference_prediction), "Predictions must have the same number of detections" + assert len(sv_result_prediction) == len( + sv_reference_prediction + ), "Predictions must have the same number of detections" assert np.allclose( sv_result_prediction.xyxy, sv_reference_prediction.xyxy, - atol=box_pixel_tolerance + atol=box_pixel_tolerance, ), ( f"Bounding boxes must match with a tolerance of {box_pixel_tolerance} pixels, " f"got {sv_result_prediction.xyxy} and {sv_reference_prediction.xyxy}" @@ -35,7 +37,7 @@ def assert_localized_predictions_match( assert np.allclose( sv_result_prediction.confidence, sv_reference_prediction.confidence, - atol=box_confidence_tolerance + atol=box_confidence_tolerance, ), ( f"Confidence must match with a tolerance of {box_confidence_tolerance}, " f"got {sv_result_prediction.confidence} and {sv_reference_prediction.confidence}" @@ -43,18 +45,23 @@ def assert_localized_predictions_match( if sv_reference_prediction.class_id is not None: assert np.array_equal( - sv_result_prediction.class_id, - sv_reference_prediction.class_id - ), ( - f"Class IDs must match, got {sv_result_prediction.class_id} and {sv_reference_prediction.class_id}" - ) - + sv_result_prediction.class_id, sv_reference_prediction.class_id + ), f"Class IDs must match, got {sv_result_prediction.class_id} and {sv_reference_prediction.class_id}" + # now for keypoint and mask specific assertions if sv_reference_prediction.mask is not None: - assert sv_result_prediction.mask is not None, "Mask must be present for instance segmentation predictions" - iou = np.sum(sv_result_prediction.mask & sv_reference_prediction.mask, axis=(1, 2)) / np.sum(sv_result_prediction.mask | sv_reference_prediction.mask, axis=(1, 2)) - assert np.all(iou > mask_iou_threshold), f"Mask IOU must be greater than {mask_iou_threshold} for all predictions, got {iou}" + assert ( + sv_result_prediction.mask is not None + ), "Mask must be present for instance segmentation predictions" + iou = np.sum( + sv_result_prediction.mask & sv_reference_prediction.mask, axis=(1, 2) + ) / np.sum( + sv_result_prediction.mask | sv_reference_prediction.mask, axis=(1, 2) + ) + assert np.all( + iou > mask_iou_threshold + ), f"Mask IOU must be greater than {mask_iou_threshold} for all predictions, got {iou}" if all("keypoints" not in p for p in reference_prediction["predictions"]): return None @@ -62,12 +69,14 @@ def assert_localized_predictions_match( result_prediction_keypoints = sv.KeyPoints.from_inference(result_prediction) reference_prediction_keypoints = sv.KeyPoints.from_inference(reference_prediction) - assert len(result_prediction_keypoints) == len(reference_prediction_keypoints), "Keypoints must have the same number of keypoints" + assert len(result_prediction_keypoints) == len( + reference_prediction_keypoints + ), "Keypoints must have the same number of keypoints" assert np.allclose( result_prediction_keypoints.xy, reference_prediction_keypoints.xy, - atol=keypoint_pixel_tolerance + atol=keypoint_pixel_tolerance, ), ( f"Keypoints must match with a tolerance of {keypoint_pixel_tolerance} pixels, " f"got {result_prediction_keypoints.xy} and {reference_prediction_keypoints.xy}" @@ -78,7 +87,7 @@ def assert_localized_predictions_match( assert np.allclose( result_prediction_keypoints.confidence, reference_prediction_keypoints.confidence, - atol=keypoint_confidence_tolerance + atol=keypoint_confidence_tolerance, ), ( f"Keypoint confidence must match with a tolerance of {keypoint_confidence_tolerance}, " f"got {result_prediction_keypoints.confidence} and {reference_prediction_keypoints.confidence}" @@ -87,10 +96,8 @@ def assert_localized_predictions_match( if result_prediction_keypoints.class_id is not None: assert np.array_equal( result_prediction_keypoints.class_id, - reference_prediction_keypoints.class_id - ), ( - f"Keypoint class IDs must match, got {result_prediction_keypoints.class_id} and {reference_prediction_keypoints.class_id}" - ) + reference_prediction_keypoints.class_id, + ), f"Keypoint class IDs must match, got {result_prediction_keypoints.class_id} and {reference_prediction_keypoints.class_id}" def assert_classification_predictions_match( @@ -98,20 +105,24 @@ def assert_classification_predictions_match( reference_prediction: dict, confidence_tolerance: float = 1e-5, ) -> None: - assert type(result_prediction) == type(reference_prediction), "Predictions must be of the same type" - assert len(result_prediction["predictions"]) == len(reference_prediction["predictions"]), "Predictions must have the same number of predictions" + assert type(result_prediction) == type( + reference_prediction + ), "Predictions must be of the same type" + assert len(result_prediction["predictions"]) == len( + reference_prediction["predictions"] + ), "Predictions must have the same number of predictions" if isinstance(reference_prediction["predictions"], dict): - assert sorted(result_prediction["predicted_classes"]) == sorted(reference_prediction["predicted_classes"]), ( - f"Predicted classes must match, got {result_prediction['predicted_classes']} and {result_prediction['predicted_classes']}" - ) + assert sorted(result_prediction["predicted_classes"]) == sorted( + reference_prediction["predicted_classes"] + ), f"Predicted classes must match, got {result_prediction['predicted_classes']} and {result_prediction['predicted_classes']}" else: - assert result_prediction["top"] == reference_prediction["top"], ( - f"Top prediction must match, got {result_prediction['top']} and {reference_prediction['top']}" - ) + assert ( + result_prediction["top"] == reference_prediction["top"] + ), f"Top prediction must match, got {result_prediction['top']} and {reference_prediction['top']}" assert np.allclose( result_prediction["confidence"], reference_prediction["confidence"], - atol=confidence_tolerance + atol=confidence_tolerance, ), ( f"Confidences must match with a tolerance of {confidence_tolerance}, " f"got {result_prediction['confidence']} and {reference_prediction['confidence']}" diff --git a/tests/conftest.py b/tests/conftest.py index 8b2b10caac..8678455903 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -3,10 +3,21 @@ import pytest -ASSETS_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "inference", "unit_tests", "core", "interfaces", "assets")) +ASSETS_DIR = os.path.abspath( + os.path.join( + os.path.dirname(__file__), + "inference", + "unit_tests", + "core", + "interfaces", + "assets", + ) +) os.environ["TELEMETRY_OPT_OUT"] = "True" -os.environ["ONNXRUNTIME_EXECUTION_PROVIDERS"] = "[CUDAExecutionProvider,CPUExecutionProvider]" +os.environ["ONNXRUNTIME_EXECUTION_PROVIDERS"] = ( + "[CUDAExecutionProvider,CPUExecutionProvider]" +) @pytest.fixture diff --git a/tests/google_colab/conftest.py b/tests/google_colab/conftest.py index 7b1a733872..30b581c5d6 100644 --- a/tests/google_colab/conftest.py +++ b/tests/google_colab/conftest.py @@ -11,9 +11,13 @@ os.environ["ONNXRUNTIME_EXECUTION_PROVIDERS"] = "[CUDAExecutionProvider]" -REFERENCE_IMAGE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "assets", "dog.jpeg")) +REFERENCE_IMAGE_PATH = os.path.abspath( + os.path.join(os.path.dirname(__file__), "assets", "dog.jpeg") +) REFERENCE_VIDEO_URL = "https://drive.google.com/uc?id=1vVwjW1dE1drIdd4ZSILfbCGPD4weoNiu" -REFERENCE_VIDEO_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "assets", "video.mp4")) +REFERENCE_VIDEO_PATH = os.path.abspath( + os.path.join(os.path.dirname(__file__), "assets", "video.mp4") +) PLAYER_DETECTION_MODEL_ID = "football-players-detection-3zvbc/11" PLAYER_CLASS_ID = 2 FOOTBALL_FIELD_DETECTOR_MODEL_ID = "football-field-detection-f07vi/14" @@ -35,5 +39,3 @@ def reference_video() -> str: return REFERENCE_VIDEO_PATH gdown.download(REFERENCE_VIDEO_URL, REFERENCE_VIDEO_PATH) return REFERENCE_VIDEO_PATH - - diff --git a/tests/google_colab/test_footbal_ai_functionalities.py b/tests/google_colab/test_footbal_ai_functionalities.py index 1739c767f4..543ead07cd 100644 --- a/tests/google_colab/test_footbal_ai_functionalities.py +++ b/tests/google_colab/test_footbal_ai_functionalities.py @@ -1,16 +1,21 @@ import numpy as np import supervision as sv from inference import get_model -from tests.google_colab.conftest import PLAYER_DETECTION_MODEL_ID, PLAYER_CLASS_ID, FOOTBALL_FIELD_DETECTOR_MODEL_ID +from tests.google_colab.conftest import ( + PLAYER_DETECTION_MODEL_ID, + PLAYER_CLASS_ID, + FOOTBALL_FIELD_DETECTOR_MODEL_ID, +) -def test_cropping_players( - reference_video: str, - roboflow_api_key: str -) -> None: +def test_cropping_players(reference_video: str, roboflow_api_key: str) -> None: # given - player_detection_model = get_model(model_id=PLAYER_DETECTION_MODEL_ID, api_key=roboflow_api_key) - frame_generator = sv.get_video_frames_generator(source_path=reference_video, stride=30) + player_detection_model = get_model( + model_id=PLAYER_DETECTION_MODEL_ID, api_key=roboflow_api_key + ) + frame_generator = sv.get_video_frames_generator( + source_path=reference_video, stride=30 + ) # when crops = [] @@ -26,18 +31,12 @@ def test_cropping_players( assert len(crops) >= 470 -def test_detecting_football_field( - reference_video: str, - roboflow_api_key: str -) -> None: +def test_detecting_football_field(reference_video: str, roboflow_api_key: str) -> None: # given field_detector_model = get_model(FOOTBALL_FIELD_DETECTOR_MODEL_ID) frame_generator = sv.get_video_frames_generator(reference_video) frame = next(frame_generator) - vertex_annotator = sv.VertexAnnotator( - color=sv.Color.from_hex("#FF1493"), - radius=8 - ) + vertex_annotator = sv.VertexAnnotator(color=sv.Color.from_hex("#FF1493"), radius=8) result = field_detector_model.infer(frame, confidence=0.3)[0] key_points = sv.KeyPoints.from_inference(result) filtered_key_points = key_points.confidence[0] > 0.5 diff --git a/tests/google_colab/test_supervision_interoperability.py b/tests/google_colab/test_supervision_interoperability.py index 19f4aadf49..e8f04dd823 100644 --- a/tests/google_colab/test_supervision_interoperability.py +++ b/tests/google_colab/test_supervision_interoperability.py @@ -9,12 +9,12 @@ def test_basic_object_detection_visualization(reference_image: np.ndarray) -> No # given model = get_model("yolov8n-640") box_annotator = sv.BoxAnnotator( - color=sv.ColorPalette.from_hex(['#FF8C00', '#00BFFF', '#FF1493', '#FFD700']), - thickness=2 + color=sv.ColorPalette.from_hex(["#FF8C00", "#00BFFF", "#FF1493", "#FFD700"]), + thickness=2, ) label_annotator = sv.LabelAnnotator( - color=sv.ColorPalette.from_hex(['#FF8C00', '#00BFFF', '#FF1493', '#FFD700']), - text_color=sv.Color.from_hex('#000000') + color=sv.ColorPalette.from_hex(["#FF8C00", "#00BFFF", "#FF1493", "#FFD700"]), + text_color=sv.Color.from_hex("#000000"), ) # when @@ -23,17 +23,15 @@ def test_basic_object_detection_visualization(reference_image: np.ndarray) -> No annotated_frame = reference_image.copy() labels = [ f"{class_name} {confidence:.2f}" - for class_name, confidence - in zip(detections["class_name"], detections.confidence) + for class_name, confidence in zip( + detections["class_name"], detections.confidence + ) ] annotated_frame = box_annotator.annotate( - scene=annotated_frame, - detections=detections + scene=annotated_frame, detections=detections ) annotated_frame = label_annotator.annotate( - scene=annotated_frame, - detections=detections, - labels=labels + scene=annotated_frame, detections=detections, labels=labels ) # then @@ -45,11 +43,11 @@ def test_basic_instance_segmentation_visualization(reference_image: np.ndarray) # given model = get_model("yolov8n-seg-640") mask_annotator = sv.MaskAnnotator( - color=sv.ColorPalette.from_hex(['#FF8C00', '#00BFFF', '#FF1493', '#FFD700']), + color=sv.ColorPalette.from_hex(["#FF8C00", "#00BFFF", "#FF1493", "#FFD700"]), ) label_annotator = sv.LabelAnnotator( - color=sv.ColorPalette.from_hex(['#FF8C00', '#00BFFF', '#FF1493', '#FFD700']), - text_color=sv.Color.from_hex('#000000') + color=sv.ColorPalette.from_hex(["#FF8C00", "#00BFFF", "#FF1493", "#FFD700"]), + text_color=sv.Color.from_hex("#000000"), ) # when @@ -58,17 +56,15 @@ def test_basic_instance_segmentation_visualization(reference_image: np.ndarray) annotated_frame = reference_image.copy() labels = [ f"{class_name} {confidence:.2f}" - for class_name, confidence - in zip(detections["class_name"], detections.confidence) + for class_name, confidence in zip( + detections["class_name"], detections.confidence + ) ] annotated_frame = mask_annotator.annotate( - scene=annotated_frame, - detections=detections + scene=annotated_frame, detections=detections ) annotated_frame = label_annotator.annotate( - scene=annotated_frame, - detections=detections, - labels=labels + scene=annotated_frame, detections=detections, labels=labels ) # then @@ -87,8 +83,7 @@ def test_basic_pose_estimation_visualization(reference_image: np.ndarray) -> Non key_points = sv.KeyPoints.from_inference(result_raw) annotated_frame = reference_image.copy() annotated_frame = vertex_annotator.annotate( - scene=annotated_frame, - key_points=key_points + scene=annotated_frame, key_points=key_points ) annotated_frame = label_annotator.annotate( scene=annotated_frame, @@ -107,8 +102,8 @@ def test_basic_tracking(reference_image: np.ndarray) -> None: tracker.reset() tracker_annotator = sv.TraceAnnotator() box_annotator = sv.BoxAnnotator( - color=sv.ColorPalette.from_hex(['#FF8C00', '#00BFFF', '#FF1493', '#FFD700']), - thickness=2 + color=sv.ColorPalette.from_hex(["#FF8C00", "#00BFFF", "#FF1493", "#FFD700"]), + thickness=2, ) # when diff --git a/tests/google_colab/test_yolo_models.py b/tests/google_colab/test_yolo_models.py index e39a5a64d8..1def8ceee3 100644 --- a/tests/google_colab/test_yolo_models.py +++ b/tests/google_colab/test_yolo_models.py @@ -305,7 +305,6 @@ def test_yolov11n_instance_segmentation_inference(reference_image: np.ndarray) - assert len(result.mask) > 0, "At least one prediction is expected" - def test_yolov11s_instance_segmentation_inference(reference_image: np.ndarray) -> None: # given model = get_model("yolov11s-seg-640") diff --git a/tests/inference/unit_tests/models/test_rfdetr.py b/tests/inference/unit_tests/models/test_rfdetr.py index 8082150448..4ac545672c 100644 --- a/tests/inference/unit_tests/models/test_rfdetr.py +++ b/tests/inference/unit_tests/models/test_rfdetr.py @@ -80,7 +80,17 @@ def test_sigmoid_stable_mixed_values(): result = model.sigmoid_stable(x) # then - expected = np.array([0.0, 0.0000453978687024, 0.2689414213699951, 0.5, 0.7310585786300049, 0.9999546021312976, 1]) + expected = np.array( + [ + 0.0, + 0.0000453978687024, + 0.2689414213699951, + 0.5, + 0.7310585786300049, + 0.9999546021312976, + 1, + ] + ) assert np.allclose(result, expected, atol=1e-15) diff --git a/tests/inference/unit_tests/usage_tracking/conftest.py b/tests/inference/unit_tests/usage_tracking/conftest.py index 6e55576f72..052db5d3ee 100644 --- a/tests/inference/unit_tests/usage_tracking/conftest.py +++ b/tests/inference/unit_tests/usage_tracking/conftest.py @@ -11,6 +11,7 @@ def usage_collector_with_mocked_threads(): This prevents the actual threads from starting during tests. """ import threading + original_thread = threading.Thread original_event = threading.Event @@ -19,6 +20,7 @@ def usage_collector_with_mocked_threads(): threading.Event = MagicMock() from inference.usage_tracking import collector as collector_module + importlib.reload(collector_module) usage_collector = collector_module.usage_collector diff --git a/tests/inference/unit_tests/usage_tracking/test_collector.py b/tests/inference/unit_tests/usage_tracking/test_collector.py index b4c3712be6..c1fca79917 100644 --- a/tests/inference/unit_tests/usage_tracking/test_collector.py +++ b/tests/inference/unit_tests/usage_tracking/test_collector.py @@ -1,9 +1,9 @@ import hashlib import json import sys +from unittest import mock import pytest -from unittest import mock from inference.core.env import LAMBDA from inference.core.version import __version__ as inference_version @@ -895,7 +895,9 @@ def test_system_info_with_dedicated_deployment_id(usage_collector_with_mocked_th assert system_info[k] == v -def test_system_info_with_no_dedicated_deployment_id(usage_collector_with_mocked_threads): +def test_system_info_with_no_dedicated_deployment_id( + usage_collector_with_mocked_threads, +): # given system_info = usage_collector_with_mocked_threads.system_info( ip_address="w.x.y.z", hostname="hostname01" @@ -973,4 +975,9 @@ def test_func(api_key="test_key"): assert len(usage_collector._usage) == 1 assert "test_key" in usage_collector._usage assert "model:unknown" in usage_collector._usage["test_key"] - assert json.loads(usage_collector._usage["test_key"]["model:unknown"]["resource_details"]).get("error") == "test exception" + assert ( + json.loads( + usage_collector._usage["test_key"]["model:unknown"]["resource_details"] + ).get("error") + == "test exception" + ) diff --git a/tests/inference_sdk/unit_tests/http/test_client.py b/tests/inference_sdk/unit_tests/http/test_client.py index a38409d781..b24a72c350 100644 --- a/tests/inference_sdk/unit_tests/http/test_client.py +++ b/tests/inference_sdk/unit_tests/http/test_client.py @@ -2075,6 +2075,7 @@ async def test_infer_from_api_v1_async_when_request_succeed_for_object_detection "visualization": "aGVsbG8=", } + @mock.patch.object(client, "load_static_inference_input") def test_ocr_image_when_single_image_given_in_v1_mode( load_static_inference_input_mock: MagicMock, @@ -2152,7 +2153,9 @@ def test_ocr_image_when_trocr_selected_in_specific_variant( ) # when - result = http_client.ocr_image(inference_input="/some/image.jpg", model="trocr", version="trocr-small-printed") + result = http_client.ocr_image( + inference_input="/some/image.jpg", model="trocr", version="trocr-small-printed" + ) # then assert result == { @@ -2162,7 +2165,7 @@ def test_ocr_image_when_trocr_selected_in_specific_variant( assert requests_mock.request_history[0].json() == { "api_key": "my-api-key", "image": {"type": "base64", "value": "base64_image"}, - "trocr_version_id": "trocr-small-printed" + "trocr_version_id": "trocr-small-printed", }, "Request must contain API key and image encoded in standard format" @@ -2222,7 +2225,9 @@ async def test_ocr_image_async_when_trocr_selected( }, ) # when - result = await http_client.ocr_image_async(inference_input="/some/image.jpg", model="trocr") + result = await http_client.ocr_image_async( + inference_input="/some/image.jpg", model="trocr" + ) # then assert result == { @@ -2284,6 +2289,7 @@ async def test_ocr_image_async_when_trocr_selected_in_specific_variant( headers={"Content-Type": "application/json"}, ) + @mock.patch.object(client, "load_static_inference_input") def test_ocr_image_when_single_image_given_in_v0_mode( load_static_inference_input_mock: MagicMock, @@ -3656,14 +3662,13 @@ def test_infer_from_workflow_when_usage_of_profiler_enabled( # given api_url = "http://some.com" http_client = InferenceHTTPClient(api_key="my-api-key", api_url=api_url).configure( - inference_configuration=InferenceConfiguration(profiling_directory=empty_directory) + inference_configuration=InferenceConfiguration( + profiling_directory=empty_directory + ) ) requests_mock.post( f"{api_url}{endpoint_to_use}", - json={ - "outputs": [{"some": 3}], - "profiler_trace": [{"my": "trace"}] - }, + json={"outputs": [{"some": 3}], "profiler_trace": [{"my": "trace"}]}, ) load_nested_batches_of_inference_input_mock.side_effect = [ ("base64_image_1", 0.5), @@ -3707,7 +3712,9 @@ def test_infer_from_workflow_when_usage_of_profiler_enabled( }, }, "Request payload must contain api key, inputs and no cache flag" json_files_in_profiling_directory = glob(os.path.join(empty_directory, "*.json")) - assert len(json_files_in_profiling_directory) == 1, "Expected to find one JSON file with profiler trace" + assert ( + len(json_files_in_profiling_directory) == 1 + ), "Expected to find one JSON file with profiler trace" with open(json_files_in_profiling_directory[0], "r") as f: data = json.load(f) assert data == [{"my": "trace"}], "Trace content must be fully saved" @@ -3754,13 +3761,7 @@ def test_infer_from_workflow_when_nested_batch_of_inputs_provided( result = method( workspace_name="my_workspace", images={"image_1": [["1", "2"], ["3", "4", "5"], ["6"]]}, - parameters={ - "batch_oriented_param": [ - ["a", "b"], - ["c", "d", "e"], - ["f"] - ] - }, + parameters={"batch_oriented_param": [["a", "b"], ["c", "d", "e"], ["f"]]}, **{parameter_name: "my_workflow"}, ) @@ -3929,9 +3930,11 @@ def test_list_inference_pipelines(requests_mock: Mocker) -> None: f"{api_url}/inference_pipelines/list", json={ "status": "success", - "context": {"request_id": "52f5df39-b7de-4a56-8c42-b979d365cfa0", - "pipeline_id": None}, - "pipelines": ["acd62146-edca-4253-8eeb-40c88906cd70"] + "context": { + "request_id": "52f5df39-b7de-4a56-8c42-b979d365cfa0", + "pipeline_id": None, + }, + "pipelines": ["acd62146-edca-4253-8eeb-40c88906cd70"], }, ) @@ -3941,12 +3944,15 @@ def test_list_inference_pipelines(requests_mock: Mocker) -> None: # then assert result == { "status": "success", - "context": {"request_id": "52f5df39-b7de-4a56-8c42-b979d365cfa0", - "pipeline_id": None}, - "pipelines": ["acd62146-edca-4253-8eeb-40c88906cd70"] + "context": { + "request_id": "52f5df39-b7de-4a56-8c42-b979d365cfa0", + "pipeline_id": None, + }, + "pipelines": ["acd62146-edca-4253-8eeb-40c88906cd70"], } - assert requests_mock.request_history[0].json() == {"api_key": "my-api-key"}, \ - "Expected payload to contain API key" + assert requests_mock.request_history[0].json() == { + "api_key": "my-api-key" + }, "Expected payload to contain API key" def test_list_inference_pipelines_on_auth_error(requests_mock: Mocker) -> None: @@ -3981,11 +3987,14 @@ def test_get_inference_pipeline_status(requests_mock: Mocker) -> None: assert result == { "status": "success", } - assert requests_mock.request_history[0].json() == {"api_key": "my-api-key"}, \ - "Expected payload to contain API key" + assert requests_mock.request_history[0].json() == { + "api_key": "my-api-key" + }, "Expected payload to contain API key" -def test_get_inference_pipeline_status_when_pipeline_id_empty(requests_mock: Mocker) -> None: +def test_get_inference_pipeline_status_when_pipeline_id_empty( + requests_mock: Mocker, +) -> None: # given api_url = "http://some.com" http_client = InferenceHTTPClient(api_key="my-api-key", api_url=api_url) @@ -3995,7 +4004,9 @@ def test_get_inference_pipeline_status_when_pipeline_id_empty(requests_mock: Moc _ = http_client.get_inference_pipeline_status(pipeline_id="") -def test_get_inference_pipeline_status_when_pipeline_id_not_found(requests_mock: Mocker) -> None: +def test_get_inference_pipeline_status_when_pipeline_id_not_found( + requests_mock: Mocker, +) -> None: # given api_url = "http://some.com" http_client = InferenceHTTPClient(api_key="my-api-key", api_url=api_url) @@ -4027,8 +4038,9 @@ def test_pause_inference_pipeline(requests_mock: Mocker) -> None: assert result == { "status": "success", } - assert requests_mock.request_history[0].json() == {"api_key": "my-api-key"}, \ - "Expected payload to contain API key" + assert requests_mock.request_history[0].json() == { + "api_key": "my-api-key" + }, "Expected payload to contain API key" def test_pause_inference_pipeline_when_pipeline_id_empty() -> None: @@ -4041,7 +4053,9 @@ def test_pause_inference_pipeline_when_pipeline_id_empty() -> None: _ = http_client.pause_inference_pipeline(pipeline_id="") -def test_pause_inference_pipeline_when_pipeline_id_not_found(requests_mock: Mocker) -> None: +def test_pause_inference_pipeline_when_pipeline_id_not_found( + requests_mock: Mocker, +) -> None: # given api_url = "http://some.com" http_client = InferenceHTTPClient(api_key="my-api-key", api_url=api_url) @@ -4073,8 +4087,9 @@ def test_resume_inference_pipeline(requests_mock: Mocker) -> None: assert result == { "status": "success", } - assert requests_mock.request_history[0].json() == {"api_key": "my-api-key"}, \ - "Expected payload to contain API key" + assert requests_mock.request_history[0].json() == { + "api_key": "my-api-key" + }, "Expected payload to contain API key" def test_resume_inference_pipeline_when_pipeline_id_empty() -> None: @@ -4087,7 +4102,9 @@ def test_resume_inference_pipeline_when_pipeline_id_empty() -> None: _ = http_client.resume_inference_pipeline(pipeline_id="") -def test_resume_inference_pipeline_when_pipeline_id_not_found(requests_mock: Mocker) -> None: +def test_resume_inference_pipeline_when_pipeline_id_not_found( + requests_mock: Mocker, +) -> None: # given api_url = "http://some.com" http_client = InferenceHTTPClient(api_key="my-api-key", api_url=api_url) @@ -4119,8 +4136,9 @@ def test_terminate_inference_pipeline(requests_mock: Mocker) -> None: assert result == { "status": "success", } - assert requests_mock.request_history[0].json() == {"api_key": "my-api-key"}, \ - "Expected payload to contain API key" + assert requests_mock.request_history[0].json() == { + "api_key": "my-api-key" + }, "Expected payload to contain API key" def test_terminate_inference_pipeline_when_pipeline_id_empty() -> None: @@ -4133,7 +4151,9 @@ def test_terminate_inference_pipeline_when_pipeline_id_empty() -> None: _ = http_client.terminate_inference_pipeline(pipeline_id="") -def test_terminate_inference_pipeline_when_pipeline_id_not_found(requests_mock: Mocker) -> None: +def test_terminate_inference_pipeline_when_pipeline_id_not_found( + requests_mock: Mocker, +) -> None: # given api_url = "http://some.com" http_client = InferenceHTTPClient(api_key="my-api-key", api_url=api_url) @@ -4168,8 +4188,10 @@ def test_consume_inference_pipeline_result(requests_mock: Mocker) -> None: assert result == { "status": "success", } - assert requests_mock.request_history[0].json() == {"api_key": "my-api-key", "excluded_fields": ["a"]}, \ - "Expected payload to contain API key" + assert requests_mock.request_history[0].json() == { + "api_key": "my-api-key", + "excluded_fields": ["a"], + }, "Expected payload to contain API key" def test_consume_inference_pipeline_result_when_pipeline_id_empty() -> None: @@ -4182,7 +4204,9 @@ def test_consume_inference_pipeline_result_when_pipeline_id_empty() -> None: _ = http_client.consume_inference_pipeline_result(pipeline_id="") -def test_consume_inference_pipeline_result_when_pipeline_id_not_found(requests_mock: Mocker) -> None: +def test_consume_inference_pipeline_result_when_pipeline_id_not_found( + requests_mock: Mocker, +) -> None: # given api_url = "http://some.com" http_client = InferenceHTTPClient(api_key="my-api-key", api_url=api_url) @@ -4196,17 +4220,23 @@ def test_consume_inference_pipeline_result_when_pipeline_id_not_found(requests_m _ = http_client.consume_inference_pipeline_result(pipeline_id="my-pipeline") -def test_start_inference_pipeline_with_workflow_when_configuration_does_not_specify_workflow() -> None: +def test_start_inference_pipeline_with_workflow_when_configuration_does_not_specify_workflow() -> ( + None +): # given api_url = "http://some.com" http_client = InferenceHTTPClient(api_key="my-api-key", api_url=api_url) # when with pytest.raises(InvalidParameterError): - http_client.start_inference_pipeline_with_workflow(video_reference="rtsp://some/stream") + http_client.start_inference_pipeline_with_workflow( + video_reference="rtsp://some/stream" + ) -def test_start_inference_pipeline_with_workflow_when_configuration_does_over_specify_workflow() -> None: +def test_start_inference_pipeline_with_workflow_when_configuration_does_over_specify_workflow() -> ( + None +): # given api_url = "http://some.com" http_client = InferenceHTTPClient(api_key="my-api-key", api_url=api_url) @@ -4221,7 +4251,9 @@ def test_start_inference_pipeline_with_workflow_when_configuration_does_over_spe ) -def test_start_inference_pipeline_with_workflow_when_configuration_is_valid(requests_mock: Mocker) -> None: +def test_start_inference_pipeline_with_workflow_when_configuration_is_valid( + requests_mock: Mocker, +) -> None: # given api_url = "http://some.com" http_client = InferenceHTTPClient(api_key="my-api-key", api_url=api_url) @@ -4270,5 +4302,3 @@ def test_start_inference_pipeline_with_workflow_when_configuration_is_valid(requ "results_buffer_size": 64, }, } - - diff --git a/tests/inference_sdk/unit_tests/http/utils/test_loaders.py b/tests/inference_sdk/unit_tests/http/utils/test_loaders.py index 0004519611..af325fc663 100644 --- a/tests/inference_sdk/unit_tests/http/utils/test_loaders.py +++ b/tests/inference_sdk/unit_tests/http/utils/test_loaders.py @@ -658,9 +658,7 @@ def test_load_nested_batches_of_inference_input_when_single_element_is_given( load_static_inference_input_mock: MagicMock, ) -> None: # given - load_static_inference_input_mock.side_effect = [ - ["image_1"] - ] + load_static_inference_input_mock.side_effect = [["image_1"]] # when result = load_nested_batches_of_inference_input( @@ -668,7 +666,9 @@ def test_load_nested_batches_of_inference_input_when_single_element_is_given( ) # then - assert result == "image_1", "Expected direct result from load_static_inference_input()" + assert ( + result == "image_1" + ), "Expected direct result from load_static_inference_input()" @mock.patch.object(loaders, "load_static_inference_input") @@ -679,7 +679,7 @@ def test_load_nested_batches_of_inference_input_when_1d_batch_is_given( load_static_inference_input_mock.side_effect = [ ["image_1"], ["image_2"], - ["image_3"] + ["image_3"], ] # when @@ -688,7 +688,11 @@ def test_load_nested_batches_of_inference_input_when_1d_batch_is_given( ) # then - assert result == ["image_1", "image_2", "image_3"], "Expected direct result from load_static_inference_input()" + assert result == [ + "image_1", + "image_2", + "image_3", + ], "Expected direct result from load_static_inference_input()" @mock.patch.object(loaders, "load_static_inference_input") diff --git a/tests/inference_sdk/unit_tests/http/utils/test_requests.py b/tests/inference_sdk/unit_tests/http/utils/test_requests.py index ddfe771f7e..13931821cd 100644 --- a/tests/inference_sdk/unit_tests/http/utils/test_requests.py +++ b/tests/inference_sdk/unit_tests/http/utils/test_requests.py @@ -160,7 +160,9 @@ def test_inject_nested_batches_of_images_into_payload_when_single_image_given() assert result == {"image": {"type": "base64", "value": "img1"}} -def test_inject_nested_batches_of_images_into_payload_when_1d_batch_of_images_given() -> None: +def test_inject_nested_batches_of_images_into_payload_when_1d_batch_of_images_given() -> ( + None +): # when result = inject_nested_batches_of_images_into_payload( payload={}, @@ -176,7 +178,9 @@ def test_inject_nested_batches_of_images_into_payload_when_1d_batch_of_images_gi } -def test_inject_nested_batches_of_images_into_payload_when_nested_batch_of_images_given() -> None: +def test_inject_nested_batches_of_images_into_payload_when_nested_batch_of_images_given() -> ( + None +): # when result = inject_nested_batches_of_images_into_payload( payload={}, diff --git a/tests/workflows/integration_tests/compilation/stub_plugins/plugin_with_dimensionality_manipulation_blocks/__init__.py b/tests/workflows/integration_tests/compilation/stub_plugins/plugin_with_dimensionality_manipulation_blocks/__init__.py index f351ca713a..612d63da7b 100644 --- a/tests/workflows/integration_tests/compilation/stub_plugins/plugin_with_dimensionality_manipulation_blocks/__init__.py +++ b/tests/workflows/integration_tests/compilation/stub_plugins/plugin_with_dimensionality_manipulation_blocks/__init__.py @@ -440,7 +440,7 @@ def get_manifest(cls) -> Type[WorkflowBlockManifest]: return DecreasingDimensionalityManifest def run(self, images: Batch[WorkflowImageData]) -> BlockResult: - pass + return {"output": len(images)} def load_blocks() -> List[Type[WorkflowBlock]]: diff --git a/tests/workflows/integration_tests/compilation/test_compilation_of_workflow_with_invalid_dimension_of_references.py b/tests/workflows/integration_tests/compilation/test_compilation_of_workflow_with_invalid_dimension_of_references.py index f4ecf6df4b..5cf0106ea1 100644 --- a/tests/workflows/integration_tests/compilation/test_compilation_of_workflow_with_invalid_dimension_of_references.py +++ b/tests/workflows/integration_tests/compilation/test_compilation_of_workflow_with_invalid_dimension_of_references.py @@ -221,8 +221,9 @@ def test_compilation_of_workflow_where_step_attempts_decreasing_dimensionality_t } # when - with pytest.raises(StepOutputLineageError): - _ = compile_workflow( - workflow_definition=WORKFLOW_ATTEMPTING_TO_REDUCE_DIM_TO_ZERO, - init_parameters=workflow_init_parameters, - ) + _ = compile_workflow( + workflow_definition=WORKFLOW_ATTEMPTING_TO_REDUCE_DIM_TO_ZERO, + init_parameters=workflow_init_parameters, + ) + + # then - no error diff --git a/tests/workflows/integration_tests/compilation/test_compilation_of_workflow_with_invalid_plugin.py b/tests/workflows/integration_tests/compilation/test_compilation_of_workflow_with_invalid_plugin.py index f0cc8ec893..2f69278dd3 100644 --- a/tests/workflows/integration_tests/compilation/test_compilation_of_workflow_with_invalid_plugin.py +++ b/tests/workflows/integration_tests/compilation/test_compilation_of_workflow_with_invalid_plugin.py @@ -145,7 +145,10 @@ def test_compilation_of_workflow_where_block_is_not_simd_but_defines_output_offs ) # then - assert compiled_workflow.execution_graph.graph[TOP_LEVEL_LINEAGE_KEY] == "$steps.problematic_dimensions" + assert ( + compiled_workflow.execution_graph.graph[TOP_LEVEL_LINEAGE_KEY] + == "$steps.problematic_dimensions" + ) WORKFLOW_WITH_INVALID_BLOCK_DECLARING_DIMENSIONALITY_REFERENCE_PROPERTY_AS_NON_BATCH = { diff --git a/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py b/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py index d3e3adf2a4..5921efedf1 100644 --- a/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py +++ b/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py @@ -1,14 +1,26 @@ import json -from typing import Literal, List, Optional, Type, Any, Tuple +from typing import Any, List, Literal, Optional, Tuple, Type from uuid import uuid4 import numpy as np from pydantic import Field -from inference.core.workflows.execution_engine.entities.base import OutputDefinition, WorkflowImageData, \ - ImageParentMetadata, Batch -from inference.core.workflows.execution_engine.entities.types import IMAGE_KIND, Selector, STRING_KIND -from inference.core.workflows.prototypes.block import WorkflowBlockManifest, WorkflowBlock, BlockResult +from inference.core.workflows.execution_engine.entities.base import ( + Batch, + ImageParentMetadata, + OutputDefinition, + WorkflowImageData, +) +from inference.core.workflows.execution_engine.entities.types import ( + IMAGE_KIND, + STRING_KIND, + Selector, +) +from inference.core.workflows.prototypes.block import ( + BlockResult, + WorkflowBlock, + WorkflowBlockManifest, +) class ImageProducerBlockManifest(WorkflowBlockManifest): @@ -33,7 +45,7 @@ def get_manifest(cls) -> Type[WorkflowBlockManifest]: def run(self, shape: Tuple[int, int, int]) -> BlockResult: image = WorkflowImageData( parent_metadata=ImageParentMetadata(parent_id=f"image_producer.{uuid4()}"), - numpy_image=np.zeros(shape, dtype=np.uint8) + numpy_image=np.zeros(shape, dtype=np.uint8), ) return {"image": image} @@ -115,10 +127,17 @@ class MultiSIMDImageConsumer(WorkflowBlock): def get_manifest(cls) -> Type[WorkflowBlockManifest]: return MultiSIMDImageConsumerManifest - def run(self, images_x: Batch[WorkflowImageData], images_y: Batch[WorkflowImageData]) -> BlockResult: + def run( + self, images_x: Batch[WorkflowImageData], images_y: Batch[WorkflowImageData] + ) -> BlockResult: results = [] for image_x, image_y in zip(images_x, images_y): - results.append({"metadata": json.dumps(image_x.numpy_image.shape) + json.dumps(image_y.numpy_image.shape)}) + results.append( + { + "metadata": json.dumps(image_x.numpy_image.shape) + + json.dumps(image_y.numpy_image.shape) + } + ) return results @@ -142,8 +161,13 @@ class MultiImageConsumer(WorkflowBlock): def get_manifest(cls) -> Type[WorkflowBlockManifest]: return MultiImageConsumerManifest - def run(self, images_x: WorkflowImageData, images_y: WorkflowImageData) -> BlockResult: - return {"shapes": json.dumps(images_x.numpy_image.shape) + json.dumps(images_y.numpy_image.shape)} + def run( + self, images_x: WorkflowImageData, images_y: WorkflowImageData + ) -> BlockResult: + return { + "shapes": json.dumps(images_x.numpy_image.shape) + + json.dumps(images_y.numpy_image.shape) + } class MultiImageConsumerRaisingDimManifest(WorkflowBlockManifest): @@ -170,8 +194,15 @@ class MultiImageConsumerRaisingDim(WorkflowBlock): def get_manifest(cls) -> Type[WorkflowBlockManifest]: return MultiImageConsumerRaisingDimManifest - def run(self, images_x: WorkflowImageData, images_y: WorkflowImageData) -> BlockResult: - return [{"shapes": json.dumps(images_x.numpy_image.shape) + json.dumps(images_y.numpy_image.shape)}] + def run( + self, images_x: WorkflowImageData, images_y: WorkflowImageData + ) -> BlockResult: + return [ + { + "shapes": json.dumps(images_x.numpy_image.shape) + + json.dumps(images_y.numpy_image.shape) + } + ] class MultiSIMDImageConsumerRaisingDimManifest(WorkflowBlockManifest): @@ -198,15 +229,24 @@ class MultiSIMDImageConsumerRaisingDim(WorkflowBlock): def get_manifest(cls) -> Type[WorkflowBlockManifest]: return MultiSIMDImageConsumerRaisingDimManifest - def run(self, images_x: Batch[WorkflowImageData], images_y: Batch[WorkflowImageData]) -> BlockResult: + def run( + self, images_x: Batch[WorkflowImageData], images_y: Batch[WorkflowImageData] + ) -> BlockResult: results = [] for image_x, image_y in zip(images_x, images_y): - results.append([{"shapes": json.dumps(image_x.numpy_image.shape) + json.dumps(image_y.numpy_image.shape)}]) + results.append( + [ + { + "shapes": json.dumps(image_x.numpy_image.shape) + + json.dumps(image_y.numpy_image.shape) + } + ] + ) return results -class MultiSIMDImageConsumerDecreasingDimManifest(WorkflowBlockManifest): - type: Literal["MultiSIMDImageConsumerDecreasingDim"] +class MultiNonSIMDImageConsumerDecreasingDimManifest(WorkflowBlockManifest): + type: Literal["MultiNonSIMDImageConsumerDecreasingDim"] images_x: Selector(kind=[IMAGE_KIND]) images_y: Selector(kind=[IMAGE_KIND]) @@ -214,6 +254,10 @@ class MultiSIMDImageConsumerDecreasingDimManifest(WorkflowBlockManifest): def describe_outputs(cls) -> List[OutputDefinition]: return [OutputDefinition(name="shapes", kind=[STRING_KIND])] + @classmethod + def get_parameters_enforcing_auto_batch_casting(cls) -> List[str]: + return ["images_x", "images_y"] + @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: return ">=1.3.0,<2.0.0" @@ -223,16 +267,22 @@ def get_output_dimensionality_offset(cls) -> int: return -1 -class MultiSIMDImageConsumerDecreasingDim(WorkflowBlock): +class MultiNonSIMDImageConsumerDecreasingDim(WorkflowBlock): @classmethod def get_manifest(cls) -> Type[WorkflowBlockManifest]: - return MultiSIMDImageConsumerDecreasingDimManifest + return MultiNonSIMDImageConsumerDecreasingDimManifest - def run(self, images_x: Batch[WorkflowImageData], images_y: Batch[WorkflowImageData]) -> BlockResult: + def run( + self, images_x: Batch[WorkflowImageData], images_y: Batch[WorkflowImageData] + ) -> BlockResult: + print("images_x", images_x, "images_y", images_y) results = [] for image_x, image_y in zip(images_x, images_y): - results.append(json.dumps(image_x.numpy_image.shape) + json.dumps(image_y.numpy_image.shape)) + results.append( + json.dumps(image_x.numpy_image.shape) + + json.dumps(image_y.numpy_image.shape) + ) return {"shapes": "\n".join(results)} @@ -298,5 +348,5 @@ def load_blocks() -> List[Type[WorkflowBlock]]: MultiSIMDImageConsumerRaisingDim, IdentityBlock, IdentitySIMDBlock, - MultiSIMDImageConsumerDecreasingDim + MultiNonSIMDImageConsumerDecreasingDim, ] diff --git a/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py b/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py index 2a88894680..2caa7ded22 100644 --- a/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py +++ b/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py @@ -22,8 +22,8 @@ { "type": "ImageConsumer", "name": "image_consumer", - "images": "$steps.image_producer.image" - } + "images": "$steps.image_producer.image", + }, ], "outputs": [ { @@ -56,9 +56,7 @@ def test_workflow_producing_image_and_consuming_it_in_block_accepting_single_bat ) # when - result = execution_engine.run( - runtime_parameters={} - ) + result = execution_engine.run(runtime_parameters={}) # then assert result == [{"shapes": "[192, 168, 3]"}] @@ -75,8 +73,8 @@ def test_workflow_producing_image_and_consuming_it_in_block_accepting_single_bat { "type": "ImageConsumerNonSIMD", "name": "image_consumer", - "images": "$steps.image_producer.image" - } + "images": "$steps.image_producer.image", + }, ], "outputs": [ { @@ -109,9 +107,7 @@ def test_workflow_producing_image_and_consuming_it_in_block_accepting_single_non ) # when - result = execution_engine.run( - runtime_parameters={} - ) + result = execution_engine.run(runtime_parameters={}) # then assert result == [{"shapes": "[192, 168, 3]"}] @@ -123,11 +119,7 @@ def test_workflow_producing_image_and_consuming_it_in_block_accepting_single_non {"type": "WorkflowImage", "name": "image"}, ], "steps": [ - { - "type": "ImageConsumer", - "name": "image_consumer", - "images": "$inputs.image" - } + {"type": "ImageConsumer", "name": "image_consumer", "images": "$inputs.image"} ], "outputs": [ { @@ -161,9 +153,7 @@ def test_workflow_consuming_input_image_in_block_accepting_single_non_simd_input image = np.zeros((240, 230, 3), dtype=np.uint8) # when - result = execution_engine.run( - runtime_parameters={"image": image} - ) + result = execution_engine.run(runtime_parameters={"image": image}) # then assert result == [{"shapes": "[240, 230, 3]"}] @@ -177,17 +167,13 @@ def test_workflow_consuming_input_image_in_block_accepting_single_non_simd_input "type": "ImageProducer", "name": "image_producer_x", }, - { - "type": "ImageProducer", - "name": "image_producer_y", - "shape": (240, 230, 3) - }, + {"type": "ImageProducer", "name": "image_producer_y", "shape": (240, 230, 3)}, { "type": "MultiSIMDImageConsumer", "name": "image_consumer", "images_x": "$steps.image_producer_x.image", "images_y": "$steps.image_producer_y.image", - } + }, ], "outputs": [ { @@ -228,7 +214,9 @@ def test_workflow_with_simd_image_consumers_consuming_images_generated_by_image_ WORKFLOW_IMAGE_PRODUCER_AND_INPUT_IMAGES_COMBINED_WITH_MULTIPLE_IMAGES_SIMD_CONSUMER = { "version": "1.1", - "inputs": [{"type": "WorkflowImage", "name": "image"},], + "inputs": [ + {"type": "WorkflowImage", "name": "image"}, + ], "steps": [ { "type": "ImageProducer", @@ -239,7 +227,7 @@ def test_workflow_with_simd_image_consumers_consuming_images_generated_by_image_ "name": "image_consumer", "images_x": "$steps.image_producer_x.image", "images_y": "$inputs.image", - } + }, ], "outputs": [ { @@ -273,7 +261,7 @@ def test_workflow_with_simd_image_consumers_consuming_images_generated_by_image_ input_images = [ np.zeros((192, 192, 3), dtype=np.uint8), np.zeros((200, 192, 3), dtype=np.uint8), - np.zeros((300, 192, 3), dtype=np.uint8) + np.zeros((300, 192, 3), dtype=np.uint8), ] # when result = execution_engine.run(runtime_parameters={"image": input_images}) @@ -282,13 +270,15 @@ def test_workflow_with_simd_image_consumers_consuming_images_generated_by_image_ assert result == [ {"metadata": "[192, 168, 3][192, 192, 3]"}, {"metadata": "[192, 168, 3][200, 192, 3]"}, - {"metadata": "[192, 168, 3][300, 192, 3]"} + {"metadata": "[192, 168, 3][300, 192, 3]"}, ] WORKFLOW_IMAGE_PRODUCER_AND_STEP_OUTPUT_IMAGES_COMBINED_WITH_MULTIPLE_IMAGES_SIMD_CONSUMER = { "version": "1.1", - "inputs": [{"type": "WorkflowImage", "name": "image"},], + "inputs": [ + {"type": "WorkflowImage", "name": "image"}, + ], "steps": [ { "type": "ImageProducer", @@ -304,7 +294,7 @@ def test_workflow_with_simd_image_consumers_consuming_images_generated_by_image_ "name": "image_consumer", "images_x": "$steps.image_producer_x.image", "images_y": "$steps.identity_simd.x", - } + }, ], "outputs": [ { @@ -338,7 +328,7 @@ def test_workflow_with_simd_image_consumers_consuming_images_generated_by_image_ input_images = [ np.zeros((192, 192, 3), dtype=np.uint8), np.zeros((200, 192, 3), dtype=np.uint8), - np.zeros((300, 192, 3), dtype=np.uint8) + np.zeros((300, 192, 3), dtype=np.uint8), ] # when result = execution_engine.run(runtime_parameters={"image": input_images}) @@ -347,7 +337,7 @@ def test_workflow_with_simd_image_consumers_consuming_images_generated_by_image_ assert result == [ {"metadata": "[192, 168, 3][192, 192, 3]"}, {"metadata": "[192, 168, 3][200, 192, 3]"}, - {"metadata": "[192, 168, 3][300, 192, 3]"} + {"metadata": "[192, 168, 3][300, 192, 3]"}, ] @@ -373,8 +363,8 @@ def test_workflow_with_simd_image_consumers_consuming_images_generated_by_image_ "type": "MultiImageConsumer", "name": "image_consumer", "images_x": "$steps.identity_simd.x", - "images_y": "$steps.image_producer_y.image" - } + "images_y": "$steps.image_producer_y.image", + }, ], "outputs": [ { @@ -407,9 +397,7 @@ def test_workflow_with_multiple_scalar_producers_feeding_data_into_scalar_consum ) # when - result = execution_engine.run( - runtime_parameters={} - ) + result = execution_engine.run(runtime_parameters={}) # then assert result == [{"shapes": "[192, 168, 3][220, 230, 3]"}] @@ -428,8 +416,8 @@ def test_workflow_with_multiple_scalar_producers_feeding_data_into_scalar_consum "type": "MultiImageConsumer", "name": "image_consumer", "images_x": "$inputs.image", - "images_y": "$steps.image_producer_y.image" - } + "images_y": "$steps.image_producer_y.image", + }, ], "outputs": [ { @@ -491,8 +479,8 @@ def test_workflow_with_scalar_producer_and_batch_input_feeding_data_into_scalar_ "type": "MultiImageConsumerRaisingDim", "name": "image_consumer", "images_x": "$steps.identity_simd.x", - "images_y": "$steps.image_producer_y.image" - } + "images_y": "$steps.image_producer_y.image", + }, ], "outputs": [ { @@ -527,9 +515,7 @@ def test_workflow_with_multiple_scalar_producers_feeding_non_simd_consumer_raisi ) # when - result = execution_engine.run( - runtime_parameters={} - ) + result = execution_engine.run(runtime_parameters={}) # then assert result == [{"shapes": "[192, 168, 3][220, 230, 3]"}] @@ -552,8 +538,8 @@ def test_workflow_with_multiple_scalar_producers_feeding_non_simd_consumer_raisi "type": "MultiImageConsumerRaisingDim", "name": "image_consumer", "images_x": "$steps.identity_simd.x", - "images_y": "$inputs.image" - } + "images_y": "$inputs.image", + }, ], "outputs": [ { @@ -590,9 +576,7 @@ def test_workflow_with_scalar_producer_and_batch_input_feeding_non_simd_consumer image_2 = np.zeros((300, 100, 3), dtype=np.uint8) # when - result = execution_engine.run( - runtime_parameters={"image": [image_1, image_2]} - ) + result = execution_engine.run(runtime_parameters={"image": [image_1, image_2]}) # then assert result == [ @@ -620,11 +604,11 @@ def test_workflow_with_scalar_producer_and_batch_input_feeding_non_simd_consumer "shape": (220, 230, 3), }, { - "type": "MultiSIMDImageConsumerDecreasingDim", + "type": "MultiNonSIMDImageConsumerDecreasingDim", "name": "image_consumer", "images_x": "$steps.identity_simd.x", - "images_y": "$steps.image_producer_y.image" - } + "images_y": "$steps.image_producer_y.image", + }, ], "outputs": [ { @@ -650,11 +634,77 @@ def test_workflow_with_multiple_scalar_producers_feeding_non_simd_consumer_decre "workflows_core.api_key": None, "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_SIMD_CONSUMER_DECREASING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCERS, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run(runtime_parameters={}) # then - with pytest.raises(StepInputDimensionalityError): - _ = ExecutionEngine.init( - workflow_definition=WORKFLOW_WITH_NON_SIMD_CONSUMER_DECREASING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCERS, - init_parameters=workflow_init_parameters, - max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, - ) + assert result[0]["shapes"] == "[192, 168, 3][220, 230, 3]" + + +WORKFLOW_WITH_NON_SIMD_CONSUMER_DECREASING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCER_AND_BATCH_INPUT = { + "version": "1.1", + "inputs": [{"type": "WorkflowImage", "name": "image"}], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + }, + { + "type": "IdentitySIMD", + "name": "identity_simd", + "x": "$steps.image_producer_x.image", + }, + { + "type": "MultiNonSIMDImageConsumerDecreasingDim", + "name": "image_consumer", + "images_x": "$steps.identity_simd.x", + "images_y": "$inputs.image", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.image_consumer.shapes", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_scalar_producer_and_batch_input_feeding_non_simd_consumer_decreasing_dim( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + image_1 = np.zeros((200, 100, 3), dtype=np.uint8) + image_2 = np.zeros((300, 100, 3), dtype=np.uint8) + + # then + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_SIMD_CONSUMER_DECREASING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCER_AND_BATCH_INPUT, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # then + result = execution_engine.run(runtime_parameters={"image": [image_1, image_2]}) + + # then + assert result == [ + {"shapes": "[192, 168, 3][200, 100, 3]\n[192, 168, 3][300, 100, 3]"} + ] diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py b/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py index af22e6661d..388631197c 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py @@ -1336,11 +1336,7 @@ def test_workflow_when_non_batch_oriented_step_feeds_compound_strictly_batch_ori ) # when - results = execution_engine.run( - runtime_parameters={ - "non_batch_parameter": "some" - } - ) + results = execution_engine.run(runtime_parameters={"non_batch_parameter": "some"}) # then assert results == [{"result": 0.4}] @@ -1760,7 +1756,7 @@ def test_workflow_when_non_batch_oriented_input_feeds_compound_strictly_batch_or result = execution_engine.run(runtime_parameters={"data": "some"}) # then - assert result == [{'result': 0.4}] + assert result == [{"result": 0.4}] WORKFLOW_WITH_BATCH_ORIENTED_INPUT_FEEDING_COMPOUND_NON_BATCH_ORIENTED_STEP = { diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_perception_encoder.py b/tests/workflows/integration_tests/execution/test_workflow_with_perception_encoder.py index 0235da4fc1..fd11fbf17c 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_perception_encoder.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_perception_encoder.py @@ -48,6 +48,7 @@ ], } + @pytest.mark.skip(reason="Known problem of race condition in execution engine") def test_perception_encoder_embedding_model( model_manager: ModelManager, @@ -76,6 +77,7 @@ def test_perception_encoder_embedding_model( assert -1.0 <= result[0]["similarity"] <= 1.0 assert len(result[0]["image_embeddings"]) >= 1024 + PERCEPTION_ENCODER_TEXT_WORKFLOW = { "version": "1.0", "inputs": [ @@ -99,6 +101,7 @@ def test_perception_encoder_embedding_model( ], } + @pytest.mark.skip(reason="Known problem of race condition in execution engine") def test_perception_encoder_text_embedding_model( model_manager: ModelManager, diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_property_extraction.py b/tests/workflows/integration_tests/execution/test_workflow_with_property_extraction.py index 2f719e5a86..9e5781452b 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_property_extraction.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_property_extraction.py @@ -4,6 +4,7 @@ import cv2 as cv import numpy as np import pytest +import supervision as sv from inference.core.env import WORKFLOWS_MAX_CONCURRENT_STEPS from inference.core.interfaces.camera.video_source import VideoSource @@ -12,7 +13,6 @@ from inference.core.interfaces.stream.watchdog import BasePipelineWatchDog from inference.core.managers.base import ModelManager from inference.core.workflows.core_steps.common.entities import StepExecutionMode -from inference.core.workflows.errors import StepOutputLineageError from inference.core.workflows.execution_engine.core import ExecutionEngine from tests.workflows.integration_tests.execution.workflows_gallery_collector.decorators import ( add_to_workflows_gallery, @@ -724,14 +724,29 @@ def test_workflow_when_there_is_faulty_application_of_aggregation_step_at_batch_ "workflows_core.api_key": roboflow_api_key, "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_INVALID_AGGREGATION, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) # when - with pytest.raises(StepOutputLineageError): - _ = ExecutionEngine.init( - workflow_definition=WORKFLOW_WITH_INVALID_AGGREGATION, - init_parameters=workflow_init_parameters, - max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, - ) + result = execution_engine.run( + runtime_parameters={ + "image": [ + np.zeros((192, 168, 3), dtype=np.uint8), + np.zeros((200, 168, 3), dtype=np.uint8), + ] + } + ) + + # then + assert len(result) == 1, "Expected result to collapse" + assert ( + len(result[0]["result"]) == 2 + ), "Expected both predictions to be placed in the list" + assert isinstance(result[0]["result"][0], sv.Detections) + assert isinstance(result[0]["result"][1], sv.Detections) WORKFLOW_WITH_ASPECT_RATIO_EXTRACTION = { diff --git a/tests/workflows/unit_tests/core_steps/fusion/test_detections_classes_replacement.py b/tests/workflows/unit_tests/core_steps/fusion/test_detections_classes_replacement.py index ca223e4b12..088488278c 100644 --- a/tests/workflows/unit_tests/core_steps/fusion/test_detections_classes_replacement.py +++ b/tests/workflows/unit_tests/core_steps/fusion/test_detections_classes_replacement.py @@ -386,9 +386,15 @@ def test_classes_replacement_when_empty_classification_predictions_fallback_clas len(result["predictions"]) == 2 ), "Expected sv.Detections.empty(), as empty classification was passed" detections = result["predictions"] - assert detections.confidence[1] == 0, "Fallback class confidence expected to be set to 0" - assert detections.class_id[1] == 123, "class id expected to be set to value passed with fallback_class_id parameter" - assert detections.data["class_name"][1] == "unknown", "class name expected to be set to value passed with fallback_class_name parameter" + assert ( + detections.confidence[1] == 0 + ), "Fallback class confidence expected to be set to 0" + assert ( + detections.class_id[1] == 123 + ), "class id expected to be set to value passed with fallback_class_id parameter" + assert ( + detections.data["class_name"][1] == "unknown" + ), "class name expected to be set to value passed with fallback_class_name parameter" def test_extract_leading_class_from_prediction_when_prediction_is_multi_label() -> None: diff --git a/tests/workflows/unit_tests/core_steps/models/foundation/test_perception_encoder.py b/tests/workflows/unit_tests/core_steps/models/foundation/test_perception_encoder.py index 95dbb338e8..45b12407fd 100644 --- a/tests/workflows/unit_tests/core_steps/models/foundation/test_perception_encoder.py +++ b/tests/workflows/unit_tests/core_steps/models/foundation/test_perception_encoder.py @@ -18,9 +18,7 @@ @pytest.fixture def mock_model_manager(): mock = MagicMock() - mock.infer_from_request_sync.return_value = MagicMock( - embeddings=[[0.1, 0.2, 0.3]] - ) + mock.infer_from_request_sync.return_value = MagicMock(embeddings=[[0.1, 0.2, 0.3]]) return mock @@ -109,7 +107,9 @@ def test_run_remotely_with_text(mock_client_cls, mock_model_manager): @patch( "inference.core.workflows.core_steps.models.foundation.perception_encoder.v1.InferenceHTTPClient" ) -def test_run_remotely_with_image(mock_client_cls, mock_model_manager, mock_workflow_image_data): +def test_run_remotely_with_image( + mock_client_cls, mock_model_manager, mock_workflow_image_data +): mock_client = MagicMock() mock_client.get_perception_encoder_image_embeddings.return_value = { "embeddings": [[0.1, 0.2, 0.3]] @@ -126,4 +126,3 @@ def test_run_remotely_with_image(mock_client_cls, mock_model_manager, mock_workf assert result["embedding"] == [0.1, 0.2, 0.3] mock_client.get_perception_encoder_image_embeddings.assert_called_once() - diff --git a/tests/workflows/unit_tests/core_steps/sinks/test_onvif_movement.py b/tests/workflows/unit_tests/core_steps/sinks/test_onvif_movement.py index ecb316c4c6..b4db7d460e 100644 --- a/tests/workflows/unit_tests/core_steps/sinks/test_onvif_movement.py +++ b/tests/workflows/unit_tests/core_steps/sinks/test_onvif_movement.py @@ -54,5 +54,5 @@ def test_manifest_parsing_when_the_input_is_valid() -> None: camera_update_rate_limit=500, camera_port=1981, flip_x_movement=True, - flip_y_movement=True + flip_y_movement=True, ) diff --git a/tests/workflows/unit_tests/core_steps/visualizations/test_icon.py b/tests/workflows/unit_tests/core_steps/visualizations/test_icon.py index ca6c4e0752..495c19611d 100644 --- a/tests/workflows/unit_tests/core_steps/visualizations/test_icon.py +++ b/tests/workflows/unit_tests/core_steps/visualizations/test_icon.py @@ -115,7 +115,7 @@ def test_icon_validation_when_dynamic_mode_with_default_position() -> None: # when result = IconManifest.model_validate(data) - + # then assert result.position == "TOP_CENTER" # Check default value is used @@ -142,13 +142,13 @@ def test_icon_validation_when_invalid_image_is_given() -> None: def test_icon_visualization_block_static_mode(): # given block = IconVisualizationBlockV1() - + # Create test images test_image = WorkflowImageData( parent_metadata=ImageParentMetadata(parent_id="test"), numpy_image=np.zeros((1000, 1000, 3), dtype=np.uint8), ) - + # Create test icon (red square) test_icon_np = np.zeros((32, 32, 3), dtype=np.uint8) test_icon_np[:, :, 2] = 255 # Make it red @@ -185,13 +185,13 @@ def test_icon_visualization_block_static_mode(): def test_icon_visualization_block_dynamic_mode(): # given block = IconVisualizationBlockV1() - + # Create test images test_image = WorkflowImageData( parent_metadata=ImageParentMetadata(parent_id="test"), numpy_image=np.zeros((1000, 1000, 3), dtype=np.uint8), ) - + # Create test icon (blue square) test_icon_np = np.zeros((32, 32, 3), dtype=np.uint8) test_icon_np[:, :, 0] = 255 # Make it blue @@ -234,13 +234,13 @@ def test_icon_visualization_block_dynamic_mode(): def test_icon_visualization_block_static_mode_negative_positioning(): # given block = IconVisualizationBlockV1() - + # Create test images test_image = WorkflowImageData( parent_metadata=ImageParentMetadata(parent_id="test"), numpy_image=np.zeros((1000, 1000, 3), dtype=np.uint8), ) - + # Create test icon (green square) test_icon_np = np.zeros((50, 50, 3), dtype=np.uint8) test_icon_np[:, :, 1] = 255 # Make it green @@ -287,7 +287,7 @@ def test_icon_validation_when_static_mode_with_defaults() -> None: # when result = IconManifest.model_validate(data) - + # then assert result.x_position == 10 assert result.y_position == 10 diff --git a/tests/workflows/unit_tests/core_steps/visualizations/test_icon_alpha.py b/tests/workflows/unit_tests/core_steps/visualizations/test_icon_alpha.py index 2747f05f4b..c78c4355de 100644 --- a/tests/workflows/unit_tests/core_steps/visualizations/test_icon_alpha.py +++ b/tests/workflows/unit_tests/core_steps/visualizations/test_icon_alpha.py @@ -1,8 +1,10 @@ +import base64 + +import cv2 import numpy as np import pytest import supervision as sv -import base64 -import cv2 + from inference.core.workflows.core_steps.visualizations.icon.v1 import ( IconVisualizationBlockV1, ) @@ -16,33 +18,33 @@ def test_icon_with_alpha_from_workflow_input_numpy(): """Test that alpha channel is preserved when icon comes from workflow input as numpy.""" # given block = IconVisualizationBlockV1() - + # Create a test background image (white) bg_image = np.ones((500, 500, 3), dtype=np.uint8) * 255 test_image = WorkflowImageData( parent_metadata=ImageParentMetadata(parent_id="background"), numpy_image=bg_image, ) - + # Create an icon with alpha channel (red circle with transparent background) icon_with_alpha = np.zeros((100, 100, 4), dtype=np.uint8) center = (50, 50) for y in range(100): for x in range(100): - dist = np.sqrt((x - center[0])**2 + (y - center[1])**2) + dist = np.sqrt((x - center[0]) ** 2 + (y - center[1]) ** 2) if dist <= 40: # Red circle with full opacity icon_with_alpha[y, x] = [0, 0, 255, 255] # BGRA else: # Transparent background icon_with_alpha[y, x] = [0, 0, 0, 0] - + # Create WorkflowImageData from numpy array (simulating workflow input) test_icon = WorkflowImageData( parent_metadata=ImageParentMetadata(parent_id="icon_input"), numpy_image=icon_with_alpha, # Has 4 channels with alpha ) - + # Run the block in static mode output = block.run( image=test_image, @@ -56,9 +58,9 @@ def test_icon_with_alpha_from_workflow_input_numpy(): x_position=200, y_position=200, ) - - result_image = output['image'].numpy_image - + + result_image = output["image"].numpy_image + # Check that transparency was preserved # The corners of where the icon was placed should still be white corner_positions = [ @@ -67,58 +69,60 @@ def test_icon_with_alpha_from_workflow_input_numpy(): (200, 299), # Bottom-left corner (299, 299), # Bottom-right corner ] - + for x, y in corner_positions: pixel_color = result_image[y, x] # Should be white or very close to white - assert np.all(pixel_color > 250), \ - f"Expected white background at ({x},{y}) due to transparency, got {pixel_color}" - + assert np.all( + pixel_color > 250 + ), f"Expected white background at ({x},{y}) due to transparency, got {pixel_color}" + # Check that the red circle is visible in the center center_x, center_y = 250, 250 center_color = result_image[center_y, center_x] - + # Should be red (high red channel, low blue/green) assert center_color[2] > 200, f"Red channel too low at center: {center_color}" - assert center_color[0] < 100 and center_color[1] < 100, \ - f"Blue/Green should be low at center: {center_color}" + assert ( + center_color[0] < 100 and center_color[1] < 100 + ), f"Blue/Green should be low at center: {center_color}" def test_icon_with_alpha_from_base64_input(): """Test that alpha channel is preserved when icon comes as base64 (API input scenario).""" # given block = IconVisualizationBlockV1() - + # Create a test background image (white) bg_image = np.ones((500, 500, 3), dtype=np.uint8) * 255 test_image = WorkflowImageData( parent_metadata=ImageParentMetadata(parent_id="background"), numpy_image=bg_image, ) - + # Create an icon with alpha channel icon_with_alpha = np.zeros((100, 100, 4), dtype=np.uint8) center = (50, 50) for y in range(100): for x in range(100): - dist = np.sqrt((x - center[0])**2 + (y - center[1])**2) + dist = np.sqrt((x - center[0]) ** 2 + (y - center[1]) ** 2) if dist <= 40: # Blue circle with full opacity icon_with_alpha[y, x] = [255, 0, 0, 255] # BGRA else: # Transparent background icon_with_alpha[y, x] = [0, 0, 0, 0] - + # Encode as PNG base64 (preserves alpha) - _, png_buffer = cv2.imencode('.png', icon_with_alpha) - png_base64 = base64.b64encode(png_buffer).decode('ascii') - + _, png_buffer = cv2.imencode(".png", icon_with_alpha) + png_base64 = base64.b64encode(png_buffer).decode("ascii") + # Create WorkflowImageData from base64 (simulating API input) test_icon = WorkflowImageData( parent_metadata=ImageParentMetadata(parent_id="icon_base64"), base64_image=png_base64, ) - + # Run the block in static mode output = block.run( image=test_image, @@ -132,28 +136,30 @@ def test_icon_with_alpha_from_base64_input(): x_position=200, y_position=200, ) - - result_image = output['image'].numpy_image - + + result_image = output["image"].numpy_image + # Check that transparency was preserved corner_positions = [ (200, 200), # Top-left corner - (299, 200), # Top-right corner + (299, 200), # Top-right corner (200, 299), # Bottom-left corner (299, 299), # Bottom-right corner ] - + for x, y in corner_positions: pixel_color = result_image[y, x] # Should be white or very close to white (allowing small variations) - assert np.all(pixel_color > 250), \ - f"Expected white background at ({x},{y}) due to transparency, got {pixel_color}" - + assert np.all( + pixel_color > 250 + ), f"Expected white background at ({x},{y}) due to transparency, got {pixel_color}" + # Check that the blue circle is visible in the center center_x, center_y = 250, 250 center_color = result_image[center_y, center_x] - + # Should be blue (high blue channel, low red/green) assert center_color[0] > 200, f"Blue channel too low at center: {center_color}" - assert center_color[1] < 100 and center_color[2] < 100, \ - f"Red/Green should be low at center: {center_color}" + assert ( + center_color[1] < 100 and center_color[2] < 100 + ), f"Red/Green should be low at center: {center_color}" diff --git a/tests/workflows/unit_tests/execution_engine/executor/test_output_constructor.py b/tests/workflows/unit_tests/execution_engine/executor/test_output_constructor.py index 08bce5656c..d88c456f15 100644 --- a/tests/workflows/unit_tests/execution_engine/executor/test_output_constructor.py +++ b/tests/workflows/unit_tests/execution_engine/executor/test_output_constructor.py @@ -8,7 +8,10 @@ from inference.core.workflows.core_steps.loader import KINDS_SERIALIZERS from inference.core.workflows.errors import AssumptionError, ExecutionEngineRuntimeError -from inference.core.workflows.execution_engine.constants import TOP_LEVEL_LINEAGE_KEY, WORKFLOW_INPUT_BATCH_LINEAGE_ID +from inference.core.workflows.execution_engine.constants import ( + TOP_LEVEL_LINEAGE_KEY, + WORKFLOW_INPUT_BATCH_LINEAGE_ID, +) from inference.core.workflows.execution_engine.entities.base import JsonField from inference.core.workflows.execution_engine.entities.types import ( IMAGE_KIND, From fb704d369f92deb46ed5dc2e1ebe7ad16ca1542b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Fri, 22 Aug 2025 12:54:36 +0200 Subject: [PATCH 06/20] WIP - testing blocks accepting compound inputs --- .../v1/compiler/graph_constructor.py | 9 +- .../step_input_assembler.py | 61 +- .../plugin_image_producer/__init__.py | 293 +++- ...ng_scalars_to_fit_into_batch_parameters.py | 1253 ++++++++++++++++- 4 files changed, 1589 insertions(+), 27 deletions(-) diff --git a/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py b/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py index 8308a62cbf..a4eb9f637c 100644 --- a/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py +++ b/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py @@ -798,10 +798,7 @@ def denote_data_flow_for_step( output_dimensionality_offset=output_dimensionality_offset, ) ) - truly_batch_parameters = parameters_with_batch_inputs.difference( - scalar_parameters_to_be_batched - ) - if not truly_batch_parameters: + if not all_lineages: if manifest.get_output_dimensionality_offset() > 0: # brave decision to open a Pandora box data_lineage = [node] @@ -1631,14 +1628,14 @@ def get_input_data_lineage_excluding_auto_batch_casting( lineage_deduplication_set = set() lineages = [] for property_name, input_definition in input_data.items(): - if property_name in scalar_parameters_to_be_batched: - continue new_lineages_detected_within_property_data = get_lineage_for_input_property( step_name=step_name, property_name=property_name, input_definition=input_definition, lineage_deduplication_set=lineage_deduplication_set, ) + if property_name in scalar_parameters_to_be_batched and len(new_lineages_detected_within_property_data) == 0: + continue lineages.extend(new_lineages_detected_within_property_data) if not lineages: return lineages diff --git a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py index 057e5dbc77..e14a2c2c6c 100644 --- a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py +++ b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py @@ -360,6 +360,7 @@ def prepare_parameters( execution_cache: ExecutionCache, ) -> BatchModeSIMDStepInput: print("DDD", step_node.auto_batch_casting_lineage_supports) + step_requests_batch_input = step_node.step_manifest.accepts_batch_input() result = {} indices_for_parameter = {} guard_of_indices_wrapping = GuardForIndicesWrapping() @@ -380,6 +381,7 @@ def prepare_parameters( execution_cache=execution_cache, guard_of_indices_wrapping=guard_of_indices_wrapping, auto_batch_casting_lineage_supports=step_node.auto_batch_casting_lineage_supports, + step_requests_batch_input=step_requests_batch_input, ) compound_inputs.add(parameter_name) else: @@ -396,6 +398,7 @@ def prepare_parameters( execution_cache=execution_cache, guard_of_indices_wrapping=guard_of_indices_wrapping, auto_batch_casting_lineage_supports=step_node.auto_batch_casting_lineage_supports, + step_requests_batch_input=step_requests_batch_input, ) contains_empty_scalar_step_output_selector = ( contains_empty_scalar_step_output_selector @@ -440,6 +443,7 @@ def get_compound_parameter_value( execution_cache: ExecutionCache, guard_of_indices_wrapping: GuardForIndicesWrapping, auto_batch_casting_lineage_supports: Dict[str, AutoBatchCastingConfig], + step_requests_batch_input: bool, ) -> Tuple[Union[list, Dict[str, Any]], Optional[List[DynamicBatchIndex]], bool]: contains_empty_scalar_step_output_selector = False batch_indices = [] @@ -459,6 +463,7 @@ def get_compound_parameter_value( execution_cache=execution_cache, guard_of_indices_wrapping=guard_of_indices_wrapping, auto_batch_casting_lineage_supports=auto_batch_casting_lineage_supports, + step_requests_batch_input=step_requests_batch_input, ) result.append(non_compound_parameter_value) contains_empty_scalar_step_output_selector = ( @@ -483,6 +488,7 @@ def get_compound_parameter_value( execution_cache=execution_cache, guard_of_indices_wrapping=guard_of_indices_wrapping, auto_batch_casting_lineage_supports=auto_batch_casting_lineage_supports, + step_requests_batch_input=step_requests_batch_input, ) result[nested_element.parameter_specification.nested_element_key] = ( non_compound_parameter_value @@ -509,6 +515,7 @@ def get_non_compound_parameter_value( execution_cache: ExecutionCache, guard_of_indices_wrapping: GuardForIndicesWrapping, auto_batch_casting_lineage_supports: Dict[str, AutoBatchCastingConfig], + step_requests_batch_input: bool, ) -> Tuple[Any, Optional[List[DynamicBatchIndex]], bool]: if not parameter.is_batch_oriented(): requested_as_batch = ( @@ -533,6 +540,7 @@ def get_non_compound_parameter_value( dynamic_batches_manager=dynamic_batches_manager, step_execution_dimensionality=step_execution_dimensionality, guard_of_indices_wrapping=guard_of_indices_wrapping, + step_requests_batch_input=step_requests_batch_input, ) elif parameter.points_to_step_output(): input_parameter: DynamicStepInputDefinition = parameter # type: ignore @@ -552,6 +560,7 @@ def get_non_compound_parameter_value( dynamic_batches_manager=dynamic_batches_manager, step_execution_dimensionality=step_execution_dimensionality, guard_of_indices_wrapping=guard_of_indices_wrapping, + step_requests_batch_input=step_requests_batch_input, ) else: static_input: StaticStepInputDefinition = parameter # type: ignore @@ -568,6 +577,7 @@ def get_non_compound_parameter_value( dynamic_batches_manager=dynamic_batches_manager, step_execution_dimensionality=step_execution_dimensionality, guard_of_indices_wrapping=guard_of_indices_wrapping, + step_requests_batch_input=step_requests_batch_input, ) dynamic_parameter: DynamicStepInputDefinition = parameter # type: ignore parameter_dimensionality = dynamic_parameter.get_dimensionality() @@ -630,11 +640,25 @@ def get_non_compound_parameter_value( f"the problem - including workflow definition you use.", context="workflow_execution | step_input_assembling", ) - if step_execution_dimensionality == 0: + if step_execution_dimensionality == 0 and not step_requests_batch_input: return Batch(batch_input, lineage_indices), lineage_indices, False - upper_level_indices = dynamic_batches_manager.get_indices_for_data_lineage( - lineage=dynamic_parameter.data_lineage[:-1], - ) + upper_lineage = dynamic_parameter.data_lineage[:-1] + if len(upper_lineage) == 0: + if not step_requests_batch_input: + raise AssumptionError( + public_message=f"Parameter: {parameter.parameter_specification.parameter_name} " + f"requires dimensionality wrapping, but registered lineage support is incompatible " + f"which should be detected by the compiler. This is most likely a bug. " + f"Contact Roboflow team through github issues " + f"(https://github.com/roboflow/inference/issues) providing full context of" + f"the problem - including workflow definition you use.", + context="workflow_execution | step_input_assembling", + ) + upper_level_indices = [()] + else: + upper_level_indices = dynamic_batches_manager.get_indices_for_data_lineage( + lineage=dynamic_parameter.data_lineage[:-1], + ) result = reduce_batch_dimensionality( indices=lineage_indices, upper_level_index=upper_level_indices, @@ -652,6 +676,7 @@ def apply_auto_batch_casting( dynamic_batches_manager: DynamicBatchesManager, step_execution_dimensionality: int, guard_of_indices_wrapping: GuardForIndicesWrapping, + step_requests_batch_input: bool, ) -> Tuple[Any, List[DynamicBatchIndex], bool]: print( f"parameter_name: {parameter_name} - auto_batch_casting_config: {auto_batch_casting_config}" @@ -706,7 +731,7 @@ def apply_auto_batch_casting( upper_level_lineage_dimensionality = ( auto_batch_casting_config.casted_dimensionality - 1 ) - if upper_level_lineage_dimensionality == 0: + if upper_level_lineage_dimensionality == 0 and not step_requests_batch_input: # for batch collapse into scalar return created_batch, indices, contains_empty_scalar_step_output_selector if auto_batch_casting_config.lineage_support is None: @@ -719,19 +744,23 @@ def apply_auto_batch_casting( upper_level_lineage_dimensionality < 1 or len(upper_level_lineage) < upper_level_lineage_dimensionality ): - raise AssumptionError( - public_message=f"Detected a situation when parameter: {parameter_name} requires dimensionality " - f"wrapping, but registered lineage support is incompatible which should be detected " - f"by the compiler. This is most likely a bug. " - f"Contact Roboflow team through github issues " - f"(https://github.com/roboflow/inference/issues) providing full context of" - f"the problem - including workflow definition you use.", - context="workflow_execution | step_input_assembling", + if not step_requests_batch_input: + raise AssumptionError( + public_message=f"Detected a situation when parameter: {parameter_name} requires dimensionality " + f"wrapping, but registered lineage support is incompatible which should be detected " + f"by the compiler. This is most likely a bug. " + f"Contact Roboflow team through github issues " + f"(https://github.com/roboflow/inference/issues) providing full context of" + f"the problem - including workflow definition you use.", + context="workflow_execution | step_input_assembling", + ) + upper_level_indices = [()] + else: + upper_level_indices = dynamic_batches_manager.get_indices_for_data_lineage( + lineage=upper_level_lineage, ) - upper_level_indices = dynamic_batches_manager.get_indices_for_data_lineage( - lineage=upper_level_lineage, - ) print("REDUCTION!") + print("upper_level_indices", upper_level_indices) result = reduce_batch_dimensionality( indices=indices, upper_level_index=upper_level_indices, diff --git a/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py b/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py index 5921efedf1..57daa34ea6 100644 --- a/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py +++ b/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py @@ -1,5 +1,5 @@ import json -from typing import Any, List, Literal, Optional, Tuple, Type +from typing import Any, List, Literal, Optional, Tuple, Type, Union, Dict from uuid import uuid4 import numpy as np @@ -249,6 +249,7 @@ class MultiNonSIMDImageConsumerDecreasingDimManifest(WorkflowBlockManifest): type: Literal["MultiNonSIMDImageConsumerDecreasingDim"] images_x: Selector(kind=[IMAGE_KIND]) images_y: Selector(kind=[IMAGE_KIND]) + additional: Union[Selector(), float] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: @@ -274,8 +275,9 @@ def get_manifest(cls) -> Type[WorkflowBlockManifest]: return MultiNonSIMDImageConsumerDecreasingDimManifest def run( - self, images_x: Batch[WorkflowImageData], images_y: Batch[WorkflowImageData] + self, images_x: Batch[WorkflowImageData], images_y: Batch[WorkflowImageData], additional: Any ) -> BlockResult: + assert not isinstance(additional, Batch) print("images_x", images_x, "images_y", images_y) results = [] for image_x, image_y in zip(images_x, images_y): @@ -286,6 +288,53 @@ def run( return {"shapes": "\n".join(results)} +class MultiSIMDImageConsumerDecreasingDimManifest(WorkflowBlockManifest): + type: Literal["MultiSIMDImageConsumerDecreasingDim"] + images_x: Selector(kind=[IMAGE_KIND]) + images_y: Selector(kind=[IMAGE_KIND]) + additional: Union[Selector(), float] + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="shapes", kind=[STRING_KIND])] + + @classmethod + def get_parameters_accepting_batches(cls) -> List[str]: + return ["images_x", "images_y"] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + @classmethod + def get_output_dimensionality_offset(cls) -> int: + return -1 + + +class MultiSIMDImageConsumerDecreasingDim(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return MultiSIMDImageConsumerDecreasingDimManifest + + def run( + self, images_x: Batch[Batch[WorkflowImageData]], images_y: Batch[Batch[WorkflowImageData]], additional: Any + ) -> BlockResult: + assert not isinstance(additional, Batch) + print("images_x", images_x, "images_y", images_y) + results = [] + for image_x_batch, image_y_batch in zip(images_x, images_y): + print("image_x_batch", image_x_batch, "image_x_batch", image_y_batch) + result = [] + for image_x, image_y in zip(image_x_batch, image_y_batch): + result.append( + json.dumps(image_x.numpy_image.shape) + + json.dumps(image_y.numpy_image.shape) + ) + results.append({"shapes": "\n".join(result)}) + return results + + class IdentityManifest(WorkflowBlockManifest): type: Literal["Identity"] x: Selector() @@ -337,6 +386,237 @@ def run(self, x: Batch[Any]) -> BlockResult: return [{"x": x_el} for x_el in x] +class BoostDimensionalityManifest(WorkflowBlockManifest): + type: Literal["BoostDimensionality"] + x: Selector() + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="x")] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + @classmethod + def get_output_dimensionality_offset(cls) -> int: + return 1 + + +class BoostDimensionalityBlock(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return BoostDimensionalityManifest + + def run(self, x: Any) -> BlockResult: + return [{"x": x}, {"x": x}] + + +class DoubleBoostDimensionalityManifest(WorkflowBlockManifest): + type: Literal["DoubleBoostDimensionality"] + x: Selector() + y: Selector() + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="x"), OutputDefinition(name="y")] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + @classmethod + def get_output_dimensionality_offset(cls) -> int: + return 1 + + +class DoubleBoostDimensionalityBlock(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return DoubleBoostDimensionalityManifest + + def run(self, x: Any, y: Any) -> BlockResult: + return [{"x": x, "y": y}, {"x": x, "y": y}] + + +class NonSIMDConsumerAcceptingListManifest(WorkflowBlockManifest): + type: Literal["NonSIMDConsumerAcceptingList"] + x: List[Selector(kind=[IMAGE_KIND])] + y: List[Selector(kind=[IMAGE_KIND])] + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="x"), OutputDefinition(name="y")] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + +class NonSIMDConsumerAcceptingListBlock(WorkflowBlock): + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return NonSIMDConsumerAcceptingListManifest + + def run(self, x: list, y: list) -> BlockResult: + return {"x": x, "y": y} + + +class NonSIMDConsumerAcceptingDictManifest(WorkflowBlockManifest): + type: Literal["NonSIMDConsumerAcceptingDict"] + x: Dict[str, Selector(kind=[IMAGE_KIND])] + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="x")] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + +class NonSIMDConsumerAcceptingDictBlock(WorkflowBlock): + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return NonSIMDConsumerAcceptingDictManifest + + def run(self, x: dict) -> BlockResult: + sorted_keys = sorted(x.keys()) + return {"x": [x[k] for k in sorted_keys]} + + +class NonSIMDConsumerAcceptingListIncDimManifest(WorkflowBlockManifest): + type: Literal["NonSIMDConsumerAcceptingListIncDim"] + x: List[Selector(kind=[IMAGE_KIND])] + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="x")] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + @classmethod + def get_output_dimensionality_offset( + cls, + ) -> int: + return 1 + + +class NonSIMDConsumerAcceptingListIncDimBlock(WorkflowBlock): + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return NonSIMDConsumerAcceptingListIncDimManifest + + def run(self, x: list) -> BlockResult: + return [{"x": x}, {"x": x}] + + +class NonSIMDConsumerAcceptingDictIncDimManifest(WorkflowBlockManifest): + type: Literal["NonSIMDConsumerAcceptingDictIncDim"] + x: Dict[str, Selector(kind=[IMAGE_KIND])] + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="x")] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + @classmethod + def get_output_dimensionality_offset( + cls, + ) -> int: + return 1 + + +class NonSIMDConsumerAcceptingDictIncDimBlock(WorkflowBlock): + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return NonSIMDConsumerAcceptingDictIncDimManifest + + def run(self, x: dict) -> BlockResult: + sorted_keys = sorted(x.keys()) + return [{"x": [x[k] for k in sorted_keys]}, {"x": [x[k] for k in sorted_keys]}] + + +class NonSIMDConsumerAcceptingListDecDimManifest(WorkflowBlockManifest): + type: Literal["NonSIMDConsumerAcceptingListDecDim"] + x: List[Selector(kind=[IMAGE_KIND])] + y: Union[Selector(), str] + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="x")] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + @classmethod + def get_output_dimensionality_offset( + cls, + ) -> int: + return -1 + + @classmethod + def get_parameters_enforcing_auto_batch_casting(cls) -> List[str]: + return ["x"] + + +class NonSIMDConsumerAcceptingListDecDimBlock(WorkflowBlock): + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return NonSIMDConsumerAcceptingListDecDimManifest + + def run(self, x: Batch[list], y: str) -> BlockResult: + assert not isinstance(y, Batch) + return {"x": [f for e in x for f in e]} + + +class NonSIMDConsumerAcceptingDictDecDimManifest(WorkflowBlockManifest): + type: Literal["NonSIMDConsumerAcceptingDictDecDim"] + x: Dict[str, Selector(kind=[IMAGE_KIND])] + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="x")] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + @classmethod + def get_output_dimensionality_offset( + cls, + ) -> int: + return -1 + + @classmethod + def get_parameters_enforcing_auto_batch_casting(cls) -> List[str]: + return ["x"] + + +class NonSIMDConsumerAcceptingDictDecDimBlock(WorkflowBlock): + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return NonSIMDConsumerAcceptingDictDecDimManifest + + def run(self, x: dict) -> BlockResult: + results = [] + sorted_keys = sorted(x.keys()) + for k in sorted_keys: + v = x[k] + assert isinstance(v, Batch) + result = [e for e in v] + results.append(result) + return {"x": results} + + def load_blocks() -> List[Type[WorkflowBlock]]: return [ ImageProducerBlock, @@ -349,4 +629,13 @@ def load_blocks() -> List[Type[WorkflowBlock]]: IdentityBlock, IdentitySIMDBlock, MultiNonSIMDImageConsumerDecreasingDim, + MultiSIMDImageConsumerDecreasingDim, + BoostDimensionalityBlock, + DoubleBoostDimensionalityBlock, + NonSIMDConsumerAcceptingListBlock, + NonSIMDConsumerAcceptingDictBlock, + NonSIMDConsumerAcceptingListIncDimBlock, + NonSIMDConsumerAcceptingDictIncDimBlock, + NonSIMDConsumerAcceptingListDecDimBlock, + NonSIMDConsumerAcceptingDictDecDimBlock ] diff --git a/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py b/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py index 2caa7ded22..e22bad1e6d 100644 --- a/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py +++ b/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py @@ -7,7 +7,7 @@ from inference.core.env import WORKFLOWS_MAX_CONCURRENT_STEPS from inference.core.managers.base import ModelManager from inference.core.workflows.core_steps.common.entities import StepExecutionMode -from inference.core.workflows.errors import StepInputDimensionalityError +from inference.core.workflows.errors import AssumptionError from inference.core.workflows.execution_engine.core import ExecutionEngine from inference.core.workflows.execution_engine.introspection import blocks_loader @@ -587,7 +587,9 @@ def test_workflow_with_scalar_producer_and_batch_input_feeding_non_simd_consumer WORKFLOW_WITH_NON_SIMD_CONSUMER_DECREASING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCERS = { "version": "1.1", - "inputs": [], + "inputs": [ + {"type": "WorkflowParameter", "name": "confidence", "default_value": 0.3}, + ], "steps": [ { "type": "ImageProducer", @@ -608,6 +610,7 @@ def test_workflow_with_scalar_producer_and_batch_input_feeding_non_simd_consumer "name": "image_consumer", "images_x": "$steps.identity_simd.x", "images_y": "$steps.image_producer_y.image", + "additional": "$inputs.confidence" }, ], "outputs": [ @@ -649,7 +652,10 @@ def test_workflow_with_multiple_scalar_producers_feeding_non_simd_consumer_decre WORKFLOW_WITH_NON_SIMD_CONSUMER_DECREASING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCER_AND_BATCH_INPUT = { "version": "1.1", - "inputs": [{"type": "WorkflowImage", "name": "image"}], + "inputs": [ + {"type": "WorkflowImage", "name": "image"}, + {"type": "WorkflowParameter", "name": "confidence", "default_value": 0.3}, + ], "steps": [ { "type": "ImageProducer", @@ -665,6 +671,7 @@ def test_workflow_with_multiple_scalar_producers_feeding_non_simd_consumer_decre "name": "image_consumer", "images_x": "$steps.identity_simd.x", "images_y": "$inputs.image", + "additional": "$inputs.confidence" }, ], "outputs": [ @@ -708,3 +715,1243 @@ def test_workflow_with_scalar_producer_and_batch_input_feeding_non_simd_consumer assert result == [ {"shapes": "[192, 168, 3][200, 100, 3]\n[192, 168, 3][300, 100, 3]"} ] + + +WORKFLOW_WITH_SIMD_CONSUMER_DECREASING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCERS = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowParameter", "name": "confidence", "default_value": 0.3}, + ], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + }, + { + "type": "IdentitySIMD", + "name": "identity_simd", + "x": "$steps.image_producer_x.image", + }, + { + "type": "ImageProducer", + "name": "image_producer_y", + "shape": (220, 230, 3), + }, + { + "type": "MultiSIMDImageConsumerDecreasingDim", + "name": "image_consumer", + "images_x": "$steps.identity_simd.x", + "images_y": "$steps.image_producer_y.image", + "additional": "$inputs.confidence" + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.image_consumer.shapes", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_multiple_scalar_producers_feeding_simd_consumer_decreasing_dim( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CONSUMER_DECREASING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCERS, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run(runtime_parameters={}) + + # then + assert result[0]["shapes"] == "[192, 168, 3][220, 230, 3]" + + +WORKFLOW_WITH_SIMD_CONSUMER_DECREASING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCER_AND_BATCH_INPUT = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image"}, + {"type": "WorkflowParameter", "name": "confidence", "default_value": 0.3}, + ], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + }, + { + "type": "IdentitySIMD", + "name": "identity_simd", + "x": "$steps.image_producer_x.image", + }, + { + "type": "MultiSIMDImageConsumerDecreasingDim", + "name": "image_consumer", + "images_x": "$steps.identity_simd.x", + "images_y": "$inputs.image", + "additional": "$inputs.confidence" + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.image_consumer.shapes", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_scalar_producer_and_batch_input_feeding_simd_consumer_decreasing_dim( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + image_1 = np.zeros((200, 100, 3), dtype=np.uint8) + image_2 = np.zeros((300, 100, 3), dtype=np.uint8) + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CONSUMER_DECREASING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCER_AND_BATCH_INPUT, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run(runtime_parameters={"image": [image_1, image_2]}) + + # then + assert result == [ + {"shapes": "[192, 168, 3][200, 100, 3]\n[192, 168, 3][300, 100, 3]"} + ] + + +WORKFLOW_WITH_SIMD_CONSUMER_DECREASING_OUTPUT_DIM_FED_BY_BATCH_INPUTS_AT_DIM_1 = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_2"}, + {"type": "WorkflowParameter", "name": "confidence", "default_value": 0.3}, + ], + "steps": [ + { + "type": "IdentitySIMD", + "name": "identity_simd", + "x": "$inputs.image_1", + }, + { + "type": "Identity", + "name": "identity_non_simd", + "x": "$inputs.image_2", + }, + { + "type": "MultiSIMDImageConsumerDecreasingDim", + "name": "image_consumer", + "images_x": "$steps.identity_simd.x", + "images_y": "$steps.identity_non_simd.x", + "additional": "$inputs.confidence" + }, + { + "type": "IdentitySIMD", + "name": "identity_simd_2", + "x": "$steps.image_consumer.shapes", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.identity_simd_2.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_batched_inputs_at_dim_1_fed_into_consumer_decreasing_the_dimensionality( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + image_1 = np.zeros((200, 100, 3), dtype=np.uint8) + image_2 = np.zeros((300, 100, 3), dtype=np.uint8) + image_3 = np.zeros((400, 100, 3), dtype=np.uint8) + image_4 = np.zeros((500, 100, 3), dtype=np.uint8) + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CONSUMER_DECREASING_OUTPUT_DIM_FED_BY_BATCH_INPUTS_AT_DIM_1, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run(runtime_parameters={ + "image_1": [image_1, image_2], + "image_2": [image_3, image_4], + }) + + # then + assert result == [ + {"shapes": "[200, 100, 3][400, 100, 3]\n[300, 100, 3][500, 100, 3]"} + ] + + +WORKFLOW_WITH_SIMD_CONSUMER_DECREASING_OUTPUT_DIM_FED_BY_BATCH_INPUTS_AT_DIM_1_BOOSTING_DIM_AT_THE_END = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_2"}, + {"type": "WorkflowParameter", "name": "confidence", "default_value": 0.3}, + ], + "steps": [ + { + "type": "IdentitySIMD", + "name": "identity_simd", + "x": "$inputs.image_1", + }, + { + "type": "Identity", + "name": "identity_non_simd", + "x": "$inputs.image_2", + }, + { + "type": "MultiSIMDImageConsumerDecreasingDim", + "name": "image_consumer", + "images_x": "$steps.identity_simd.x", + "images_y": "$steps.identity_non_simd.x", + "additional": "$inputs.confidence" + }, + { + "type": "IdentitySIMD", + "name": "identity_simd_2", + "x": "$steps.image_consumer.shapes", + }, + { + "type": "BoostDimensionality", + "name": "dimensionality_boost", + "x": "$steps.identity_simd_2.x" + } + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.dimensionality_boost.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_batched_inputs_at_dim_1_fed_into_consumer_decreasing_the_dimensionality_and_boosting_scalar_dim_at_the_end( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + + # when + with pytest.raises(AssumptionError): + # TESTING CURRENT LIMITATION OF EE - WE CANNOT HAVE A BLOCK THAT YIELDS NEW 1ST LEVEL + # OF DIMENSIONALITY (WHICH IS DICTATED BY INPUTS)! + _ = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CONSUMER_DECREASING_OUTPUT_DIM_FED_BY_BATCH_INPUTS_AT_DIM_1_BOOSTING_DIM_AT_THE_END, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + +WORKFLOW_WITH_SIMD_CONSUMER_DECREASING_OUTPUT_DIM_FED_BY_BATCH_INPUTS_AT_DIM_2 = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_2"}, + {"type": "WorkflowParameter", "name": "confidence", "default_value": 0.3}, + ], + "steps": [ + { + "type": "DoubleBoostDimensionality", + "name": "dimensionality_boost", + "x": "$inputs.image_1", + "y": "$inputs.image_2", + }, + { + "type": "MultiSIMDImageConsumerDecreasingDim", + "name": "image_consumer", + "images_x": "$steps.dimensionality_boost.x", + "images_y": "$steps.dimensionality_boost.y", + "additional": "$inputs.confidence" + }, + { + "type": "IdentitySIMD", + "name": "identity_simd_2", + "x": "$steps.image_consumer.shapes", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.identity_simd_2.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_batched_inputs_at_dim_2_fed_into_consumer_decreasing_the_dimensionality( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + image_1 = np.zeros((200, 100, 3), dtype=np.uint8) + image_2 = np.zeros((300, 100, 3), dtype=np.uint8) + image_3 = np.zeros((400, 100, 3), dtype=np.uint8) + image_4 = np.zeros((500, 100, 3), dtype=np.uint8) + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CONSUMER_DECREASING_OUTPUT_DIM_FED_BY_BATCH_INPUTS_AT_DIM_2, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run(runtime_parameters={ + "image_1": [image_1, image_2], + "image_2": [image_3, image_4], + }) + + # then + assert result == [ + {"shapes": "[200, 100, 3][400, 100, 3]\n[200, 100, 3][400, 100, 3]"}, + {"shapes": "[300, 100, 3][500, 100, 3]\n[300, 100, 3][500, 100, 3]"} + ] + + +WORKFLOW_WITH_SIMD_CONSUMER_DECREASING_OUTPUT_DIM_FED_BY_SCALAR_INPUTS_BOOSTING_DIM_AT_THE_END = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowParameter", "name": "confidence", "default_value": 0.3}, + ], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + }, + { + "type": "ImageProducer", + "name": "image_producer_y", + }, + { + "type": "MultiSIMDImageConsumerDecreasingDim", + "name": "image_consumer", + "images_x": "$steps.image_producer_x.image", + "images_y": "$steps.image_producer_y.image", + "additional": "$inputs.confidence" + }, + { + "type": "IdentitySIMD", + "name": "identity_simd_2", + "x": "$steps.image_consumer.shapes", + }, + { + "type": "BoostDimensionality", + "name": "dimensionality_boost", + "x": "$steps.identity_simd_2.x" + } + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.dimensionality_boost.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_scalar_inputs_fed_into_consumer_decreasing_the_dimensionality_and_boosting_scalar_dim_at_the_end( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CONSUMER_DECREASING_OUTPUT_DIM_FED_BY_SCALAR_INPUTS_BOOSTING_DIM_AT_THE_END, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run(runtime_parameters={}) + + # then + assert results == [ + {"shapes": "[192, 168, 3][192, 168, 3]"}, + {"shapes": "[192, 168, 3][192, 168, 3]"} + ] + + +WORKFLOW_WITH_SIMD_CUSTOMER_ACCEPTING_LIST_OF_SIMD_IMAGES = { + "version": "1.1", + "inputs": [], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + "shape": (100, 100, 3) + }, + { + "type": "ImageProducer", + "name": "image_producer_y", + "shape": (200, 200, 3), + }, + { + "type": "ImageProducer", + "name": "image_producer_z", + "shape": (300, 300, 3), + }, + { + "type": "NonSIMDConsumerAcceptingList", + "name": "image_consumer", + "x": ["$steps.image_producer_x.image", "$steps.image_producer_y.image"], + "y": ["$steps.image_producer_z.image"], + }, + { + "type": "BoostDimensionality", + "name": "dimensionality_boost", + "x": "$steps.image_consumer.x" + } + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.dimensionality_boost.x", + }, + { + "type": "JsonField", + "name": "y", + "selector": "$steps.image_consumer.y", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_consumers_accepting_list_of_scalar_selector( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_ACCEPTING_LIST_OF_SIMD_IMAGES, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run(runtime_parameters={}) + + # then + assert len(results) == 2, "Expected dim increase to happen" + assert [i.numpy_image.shape for i in results[0]["x"]] == [(100, 100, 3), (200, 200, 3)] + assert [i.numpy_image.shape for i in results[0]["y"]] == [(300, 300, 3)] + assert [i.numpy_image.shape for i in results[1]["x"]] == [(100, 100, 3), (200, 200, 3)] + assert [i.numpy_image.shape for i in results[1]["y"]] == [(300, 300, 3)] + + +WORKFLOW_WITH_SIMD_CUSTOMER_ACCEPTING_LIST_OF_BATCH_IMAGES = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_2"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + { + "type": "NonSIMDConsumerAcceptingList", + "name": "image_consumer", + "x": ["$inputs.image_1", "$inputs.image_2"], + "y": ["$inputs.image_3"], + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + { + "type": "JsonField", + "name": "y", + "selector": "$steps.image_consumer.y", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_consumers_accepting_list_of_batch_selector( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_ACCEPTING_LIST_OF_BATCH_IMAGES, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run(runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_2": [np.zeros((200, 200, 3)), np.zeros((220, 220, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + }) + + # then + assert len(results) == 2 + assert [i.numpy_image.shape for i in results[0]["x"]] == [(100, 100, 3), (200, 200, 3)] + assert [i.numpy_image.shape for i in results[0]["y"]] == [(300, 300, 3)] + assert [i.numpy_image.shape for i in results[1]["x"]] == [(120, 120, 3), (220, 220, 3)] + assert [i.numpy_image.shape for i in results[1]["y"]] == [(320, 320, 3)] + + +WORKFLOW_WITH_SIMD_CUSTOMER_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + "shape": (50, 50, 3) + }, + { + "type": "NonSIMDConsumerAcceptingList", + "name": "image_consumer", + "x": ["$inputs.image_1", "$steps.image_producer_x.image"], + "y": ["$inputs.image_3"], + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + { + "type": "JsonField", + "name": "y", + "selector": "$steps.image_consumer.y", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_consumers_accepting_list_of_batch_and_scalar_selector( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run(runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + }) + + # then + assert len(results) == 2 + assert [i.numpy_image.shape for i in results[0]["x"]] == [(100, 100, 3), (50, 50, 3)] + assert [i.numpy_image.shape for i in results[0]["y"]] == [(300, 300, 3)] + assert [i.numpy_image.shape for i in results[1]["x"]] == [(120, 120, 3), (50, 50, 3)] + assert [i.numpy_image.shape for i in results[1]["y"]] == [(320, 320, 3)] + + +WORKFLOW_WITH_SIMD_CUSTOMER_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + "shape": (50, 50, 3) + }, + { + "type": "NonSIMDConsumerAcceptingDict", + "name": "image_consumer", + "x": { + "a": "$inputs.image_1", + "b": "$steps.image_producer_x.image", + "c": "$inputs.image_3" + }, + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_consumers_accepting_dict_of_batch_and_scalar_selector( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run(runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + }) + + # then + assert len(results) == 2 + assert [i.numpy_image.shape for i in results[0]["x"]] == [(100, 100, 3), (50, 50, 3), (300, 300, 3)] + assert [i.numpy_image.shape for i in results[1]["x"]] == [(120, 120, 3), (50, 50, 3), (320, 320, 3)] + + +WORKFLOW_WITH_SIMD_CUSTOMER_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2 = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + { + "type": "DoubleBoostDimensionality", + "name": "dimensionality_boost", + "x": "$inputs.image_1", + "y": "$inputs.image_3", + }, + { + "type": "ImageProducer", + "name": "image_producer_x", + "shape": (50, 50, 3) + }, + { + "type": "NonSIMDConsumerAcceptingDict", + "name": "image_consumer", + "x": { + "a": "$steps.dimensionality_boost.x", + "b": "$steps.image_producer_x.image", + "c": "$steps.dimensionality_boost.y", + }, + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_consumers_accepting_dict_of_batch_and_scalar_selector_when_batch_at_dim_2( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run(runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + }) + + # then + assert len(results) == 2 + assert [i.numpy_image.shape for i in results[0]["x"][0]] == [(100, 100, 3), (50, 50, 3), (300, 300, 3)] + assert [i.numpy_image.shape for i in results[0]["x"][1]] == [(100, 100, 3), (50, 50, 3), (300, 300, 3)] + assert [i.numpy_image.shape for i in results[1]["x"][0]] == [(120, 120, 3), (50, 50, 3), (320, 320, 3)] + assert [i.numpy_image.shape for i in results[1]["x"][1]] == [(120, 120, 3), (50, 50, 3), (320, 320, 3)] + + +WORKFLOW_WITH_SIMD_CUSTOMER_INCREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2 = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + { + "type": "DoubleBoostDimensionality", + "name": "dimensionality_boost", + "x": "$inputs.image_1", + "y": "$inputs.image_3", + }, + { + "type": "ImageProducer", + "name": "image_producer_x", + "shape": (50, 50, 3) + }, + { + "type": "NonSIMDConsumerAcceptingDictIncDim", + "name": "image_consumer", + "x": { + "a": "$steps.dimensionality_boost.x", + "b": "$steps.image_producer_x.image", + "c": "$steps.dimensionality_boost.y", + }, + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_consumer_inc_dim_accepting_dict_of_batch_and_scalar_selector_when_batch_at_dim_2( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_INCREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run(runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + }) + + # then + assert len(results) == 2 + assert [i.numpy_image.shape for i in results[0]["x"][0][0]] == [(100, 100, 3), (50, 50, 3), (300, 300, 3)] + assert [i.numpy_image.shape for i in results[0]["x"][0][1]] == [(100, 100, 3), (50, 50, 3), (300, 300, 3)] + assert [i.numpy_image.shape for i in results[0]["x"][1][0]] == [(100, 100, 3), (50, 50, 3), (300, 300, 3)] + assert [i.numpy_image.shape for i in results[0]["x"][1][1]] == [(100, 100, 3), (50, 50, 3), (300, 300, 3)] + assert [i.numpy_image.shape for i in results[1]["x"][0][0]] == [(120, 120, 3), (50, 50, 3), (320, 320, 3)] + assert [i.numpy_image.shape for i in results[1]["x"][0][1]] == [(120, 120, 3), (50, 50, 3), (320, 320, 3)] + assert [i.numpy_image.shape for i in results[1]["x"][1][0]] == [(120, 120, 3), (50, 50, 3), (320, 320, 3)] + assert [i.numpy_image.shape for i in results[1]["x"][1][1]] == [(120, 120, 3), (50, 50, 3), (320, 320, 3)] + + +WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2 = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + { + "type": "DoubleBoostDimensionality", + "name": "dimensionality_boost", + "x": "$inputs.image_1", + "y": "$inputs.image_3", + }, + { + "type": "ImageProducer", + "name": "image_producer_x", + "shape": (50, 50, 3) + }, + { + "type": "NonSIMDConsumerAcceptingDictDecDim", + "name": "image_consumer", + "x": { + "a": "$steps.dimensionality_boost.x", + "b": "$steps.image_producer_x.image", + "c": "$steps.dimensionality_boost.y", + }, + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_consumer_dec_dim_accepting_dict_of_batch_and_scalar_selector_when_batch_at_dim_2( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run(runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + }) + + # then + assert len(results) == 2 + assert [i.numpy_image.shape for i in results[0]["x"][0]] == [(100, 100, 3), (100, 100, 3)] + assert [i.numpy_image.shape for i in results[0]["x"][1]] == [(50, 50, 3), (50, 50, 3)] + assert [i.numpy_image.shape for i in results[0]["x"][2]] == [(300, 300, 3), (300, 300, 3)] + assert [i.numpy_image.shape for i in results[1]["x"][0]] == [(120, 120, 3), (120, 120, 3)] + assert [i.numpy_image.shape for i in results[1]["x"][1]] == [(50, 50, 3), (50, 50, 3)] + assert [i.numpy_image.shape for i in results[1]["x"][2]] == [(320, 320, 3), (320, 320, 3)] + + +WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1 = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + "shape": (50, 50, 3) + }, + { + "type": "NonSIMDConsumerAcceptingDictDecDim", + "name": "image_consumer", + "x": { + "a": "$inputs.image_1", + "b": "$steps.image_producer_x.image", + "c": "$inputs.image_3", + }, + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_consumer_dec_dim_accepting_dict_of_batch_and_scalar_selector_when_batch_at_dim_1( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run(runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + }) + + # then + assert len(results) == 1 + assert [i.numpy_image.shape for i in results[0]["x"][0]] == [(100, 100, 3), (120, 120, 3)] + assert [i.numpy_image.shape for i in results[0]["x"][1]] == [(50, 50, 3), (50, 50, 3)] + assert [i.numpy_image.shape for i in results[0]["x"][2]] == [(300, 300, 3), (320, 320, 3)] + + +WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_SCALARS = { + "version": "1.1", + "inputs": [], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + "shape": (50, 50, 3) + }, + { + "type": "ImageProducer", + "name": "image_producer_y", + "shape": (60, 60, 3) + }, + { + "type": "NonSIMDConsumerAcceptingDictDecDim", + "name": "image_consumer", + "x": { + "a": "$steps.image_producer_x.image", + "b": "$steps.image_producer_y.image", + }, + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_consumer_dec_dim_accepting_dict_of_batch_and_scalar_selectors( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_SCALARS, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run(runtime_parameters={}) + + # then + assert len(results) == 1 + assert [i.numpy_image.shape for i in results[0]["x"][0]] == [(50, 50, 3)] + assert [i.numpy_image.shape for i in results[0]["x"][1]] == [(60, 60, 3)] + + +##################################### + +WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2 = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + { + "type": "DoubleBoostDimensionality", + "name": "dimensionality_boost", + "x": "$inputs.image_1", + "y": "$inputs.image_3", + }, + { + "type": "ImageProducer", + "name": "image_producer_x", + "shape": (50, 50, 3) + }, + { + "type": "NonSIMDConsumerAcceptingListDecDim", + "name": "image_consumer", + "x": [ + "$steps.dimensionality_boost.x", + "$steps.image_producer_x.image", + "$steps.dimensionality_boost.y", + ], + "y": "some-value" + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_selector_when_batch_at_dim_2( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run(runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + }) + + # then + assert len(results) == 2 + assert [i.numpy_image.shape for i in results[0]["x"]] == [(100, 100, 3), (100, 100, 3), (50, 50, 3), (50, 50, 3), (300, 300, 3), (300, 300, 3)] + assert [i.numpy_image.shape for i in results[1]["x"]] == [(120, 120, 3), (120, 120, 3), (50, 50, 3), (50, 50, 3), (320, 320, 3), (320, 320, 3)] + + +WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1 = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + "shape": (50, 50, 3) + }, + { + "type": "NonSIMDConsumerAcceptingListDecDim", + "name": "image_consumer", + "x": ["$inputs.image_1", "$steps.image_producer_x.image", "$inputs.image_3"], + "y": "some-value" + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_selector_when_batch_at_dim_1( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run(runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + }) + + # then + assert len(results) == 1 + assert [i.numpy_image.shape for i in results[0]["x"]] == [(100, 100, 3), (120, 120, 3), (50, 50, 3), (50, 50, 3), (300, 300, 3), (320, 320, 3)] + + +WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_SCALARS = { + "version": "1.1", + "inputs": [], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + "shape": (50, 50, 3) + }, + { + "type": "ImageProducer", + "name": "image_producer_y", + "shape": (60, 60, 3) + }, + { + "type": "NonSIMDConsumerAcceptingListDecDim", + "name": "image_consumer", + "x": ["$steps.image_producer_x.image", "$steps.image_producer_y.image"], + "y": "some-value" + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_selectors( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_SCALARS, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run(runtime_parameters={}) + + # then + assert len(results) == 1 + assert [i.numpy_image.shape for i in results[0]["x"]] == [(50, 50, 3), (60, 60, 3)] From 69ebbb17dbc8468addf35b6b209071a649f822fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Fri, 22 Aug 2025 13:47:43 +0200 Subject: [PATCH 07/20] WIP - testing blocks accepting compound inputs --- .../plugin_image_producer/__init__.py | 201 +++- ...ng_scalars_to_fit_into_batch_parameters.py | 866 +++++++++++++++++- 2 files changed, 1041 insertions(+), 26 deletions(-) diff --git a/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py b/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py index 57daa34ea6..129e297ade 100644 --- a/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py +++ b/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py @@ -1,4 +1,5 @@ import json +from collections import defaultdict from typing import Any, List, Literal, Optional, Tuple, Type, Union, Dict from uuid import uuid4 @@ -464,6 +465,47 @@ def run(self, x: list, y: list) -> BlockResult: return {"x": x, "y": y} +class SIMDConsumerAcceptingListManifest(WorkflowBlockManifest): + type: Literal["SIMDConsumerAcceptingList"] + x: List[Selector(kind=[IMAGE_KIND])] + y: List[Selector(kind=[IMAGE_KIND])] + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="x"), OutputDefinition(name="y")] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + @classmethod + def get_parameters_accepting_batches(cls) -> List[str]: + return ["x", "y"] + + +class SIMDConsumerAcceptingListBlock(WorkflowBlock): + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return SIMDConsumerAcceptingListManifest + + def run(self, x: List[Batch[WorkflowImageData]], y: List[Batch[WorkflowImageData]]) -> BlockResult: + idx2x = defaultdict(list) + idx2y = defaultdict(list) + for batch_x in x: + for idx, el in enumerate(batch_x): + idx2x[idx].append(el) + for batch_y in y: + for idx, el in enumerate(batch_y): + idx2y[idx].append(el) + indices_x = sorted(idx2x.keys()) + indices_y = sorted(idx2y.keys()) + assert indices_x == indices_y + results = [] + for idx in indices_x: + results.append({"x": idx2x[idx], "y": idx2y[idx]}) + return results + + class NonSIMDConsumerAcceptingDictManifest(WorkflowBlockManifest): type: Literal["NonSIMDConsumerAcceptingDict"] x: Dict[str, Selector(kind=[IMAGE_KIND])] @@ -487,6 +529,48 @@ def run(self, x: dict) -> BlockResult: return {"x": [x[k] for k in sorted_keys]} +class SIMDConsumerAcceptingDictManifest(WorkflowBlockManifest): + type: Literal["SIMDConsumerAcceptingDict"] + x: Dict[str, Selector(kind=[IMAGE_KIND])] + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="x")] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + @classmethod + def get_parameters_accepting_batches(cls) -> List[str]: + return ["x"] + + +class SIMDConsumerAcceptingDictBlock(WorkflowBlock): + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return SIMDConsumerAcceptingDictManifest + + def run(self, x: Dict[str, Batch[Any]]) -> BlockResult: + sorted_keys = sorted(x.keys()) + keys_stashes = {k: {} for k in sorted_keys} + for key, key_batch in x.items(): + assert isinstance(key_batch, Batch) + for idx, key_batch_el in enumerate(key_batch): + keys_stashes[key][idx] = key_batch_el + reference_indices = None + for stash in keys_stashes.values(): + sorted_idx = sorted(stash.keys()) + if reference_indices is None: + reference_indices = sorted_idx + assert sorted_idx == reference_indices + assert reference_indices is not None + results = [] + for idx in reference_indices: + results.append({"x": [keys_stashes[k][idx] for k in sorted_keys]}) + return results + + class NonSIMDConsumerAcceptingListIncDimManifest(WorkflowBlockManifest): type: Literal["NonSIMDConsumerAcceptingListIncDim"] x: List[Selector(kind=[IMAGE_KIND])] @@ -544,6 +628,57 @@ def run(self, x: dict) -> BlockResult: return [{"x": [x[k] for k in sorted_keys]}, {"x": [x[k] for k in sorted_keys]}] +class SIMDConsumerAcceptingDictIncDimManifest(WorkflowBlockManifest): + type: Literal["SIMDConsumerAcceptingDictIncDim"] + x: Dict[str, Selector(kind=[IMAGE_KIND])] + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="x")] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + @classmethod + def get_output_dimensionality_offset( + cls, + ) -> int: + return 1 + + @classmethod + def get_parameters_accepting_batches(cls) -> List[str]: + return ["x"] + + +class SIMDConsumerAcceptingDictIncDimBlock(WorkflowBlock): + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return SIMDConsumerAcceptingDictIncDimManifest + + def run(self, x: Dict[str, Batch[Any]]) -> BlockResult: + sorted_keys = sorted(x.keys()) + keys_stashes = {k: {} for k in sorted_keys} + for key, key_batch in x.items(): + assert isinstance(key_batch, Batch) + for idx, key_batch_el in enumerate(key_batch): + keys_stashes[key][idx] = key_batch_el + reference_indices = None + for stash in keys_stashes.values(): + sorted_idx = sorted(stash.keys()) + if reference_indices is None: + reference_indices = sorted_idx + assert sorted_idx == reference_indices + assert reference_indices is not None + results = [] + for idx in reference_indices: + results.append([ + {"x": [keys_stashes[k][idx] for k in sorted_keys]}, + {"x": [keys_stashes[k][idx] for k in sorted_keys]} + ]) + return results + + class NonSIMDConsumerAcceptingListDecDimManifest(WorkflowBlockManifest): type: Literal["NonSIMDConsumerAcceptingListDecDim"] x: List[Selector(kind=[IMAGE_KIND])] @@ -617,6 +752,66 @@ def run(self, x: dict) -> BlockResult: return {"x": results} +class SIMDConsumerAcceptingDictDecDimManifest(WorkflowBlockManifest): + type: Literal["SIMDConsumerAcceptingDictDecDim"] + x: Dict[str, Selector(kind=[IMAGE_KIND])] + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="x")] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + @classmethod + def get_output_dimensionality_offset( + cls, + ) -> int: + return -1 + + @classmethod + def get_parameters_accepting_batches(cls) -> List[str]: + return ["x"] + + +class SIMDConsumerAcceptingDictDecDimBlock(WorkflowBlock): + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return SIMDConsumerAcceptingDictDecDimManifest + + def run(self, x: Dict[str, Batch[Batch[Any]]]) -> BlockResult: + sorted_keys = sorted(x.keys()) + keys_stashes = {k: {} for k in sorted_keys} + for key, key_batch in x.items(): + assert isinstance(key_batch, Batch) + for idx, key_batch_el in enumerate(key_batch): + assert isinstance(key_batch_el, Batch) + keys_stashes[key][idx] = list(key_batch_el) + reference_indices = None + for stash in keys_stashes.values(): + sorted_idx = sorted(stash.keys()) + if reference_indices is None: + reference_indices = sorted_idx + assert sorted_idx == reference_indices + assert reference_indices is not None + results = [] + for idx in reference_indices: + merged = [] + for k in sorted_keys: + merged.extend(keys_stashes[k][idx]) + results.append({"x": merged}) + return results + # results = [] + # sorted_keys = sorted(x.keys()) + # for k in sorted_keys: + # v = x[k] + # assert isinstance(v, Batch) + # result = [e for e in v] + # results.append(result) + # return {"x": results} + + def load_blocks() -> List[Type[WorkflowBlock]]: return [ ImageProducerBlock, @@ -637,5 +832,9 @@ def load_blocks() -> List[Type[WorkflowBlock]]: NonSIMDConsumerAcceptingListIncDimBlock, NonSIMDConsumerAcceptingDictIncDimBlock, NonSIMDConsumerAcceptingListDecDimBlock, - NonSIMDConsumerAcceptingDictDecDimBlock + NonSIMDConsumerAcceptingDictDecDimBlock, + SIMDConsumerAcceptingListBlock, + SIMDConsumerAcceptingDictBlock, + SIMDConsumerAcceptingDictIncDimBlock, + SIMDConsumerAcceptingDictDecDimBlock ] diff --git a/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py b/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py index e22bad1e6d..eb626d6d1e 100644 --- a/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py +++ b/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py @@ -1142,7 +1142,7 @@ def test_workflow_with_scalar_inputs_fed_into_consumer_decreasing_the_dimensiona ] -WORKFLOW_WITH_SIMD_CUSTOMER_ACCEPTING_LIST_OF_SIMD_IMAGES = { +WORKFLOW_WITH_NON_SIMD_CUSTOMER_ACCEPTING_LIST_OF_SCALAR_IMAGES = { "version": "1.1", "inputs": [], "steps": [ @@ -1188,6 +1188,819 @@ def test_workflow_with_scalar_inputs_fed_into_consumer_decreasing_the_dimensiona } +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_non_simd_consumers_accepting_list_of_scalar_selector( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_SIMD_CUSTOMER_ACCEPTING_LIST_OF_SCALAR_IMAGES, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run(runtime_parameters={}) + + # then + assert len(results) == 2, "Expected dim increase to happen" + assert [i.numpy_image.shape for i in results[0]["x"]] == [(100, 100, 3), (200, 200, 3)] + assert [i.numpy_image.shape for i in results[0]["y"]] == [(300, 300, 3)] + assert [i.numpy_image.shape for i in results[1]["x"]] == [(100, 100, 3), (200, 200, 3)] + assert [i.numpy_image.shape for i in results[1]["y"]] == [(300, 300, 3)] + + +WORKFLOW_WITH_NON_SIMD_CUSTOMER_ACCEPTING_LIST_OF_BATCH_IMAGES = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_2"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + { + "type": "NonSIMDConsumerAcceptingList", + "name": "image_consumer", + "x": ["$inputs.image_1", "$inputs.image_2"], + "y": ["$inputs.image_3"], + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + { + "type": "JsonField", + "name": "y", + "selector": "$steps.image_consumer.y", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_non_simd_consumers_accepting_list_of_batch_selector( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_SIMD_CUSTOMER_ACCEPTING_LIST_OF_BATCH_IMAGES, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run(runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_2": [np.zeros((200, 200, 3)), np.zeros((220, 220, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + }) + + # then + assert len(results) == 2 + assert [i.numpy_image.shape for i in results[0]["x"]] == [(100, 100, 3), (200, 200, 3)] + assert [i.numpy_image.shape for i in results[0]["y"]] == [(300, 300, 3)] + assert [i.numpy_image.shape for i in results[1]["x"]] == [(120, 120, 3), (220, 220, 3)] + assert [i.numpy_image.shape for i in results[1]["y"]] == [(320, 320, 3)] + + +WORKFLOW_WITH_NON_SIMD_CUSTOMER_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + "shape": (50, 50, 3) + }, + { + "type": "NonSIMDConsumerAcceptingList", + "name": "image_consumer", + "x": ["$inputs.image_1", "$steps.image_producer_x.image"], + "y": ["$inputs.image_3"], + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + { + "type": "JsonField", + "name": "y", + "selector": "$steps.image_consumer.y", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_non_simd_consumers_accepting_list_of_batch_and_scalar_selector( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_SIMD_CUSTOMER_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run(runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + }) + + # then + assert len(results) == 2 + assert [i.numpy_image.shape for i in results[0]["x"]] == [(100, 100, 3), (50, 50, 3)] + assert [i.numpy_image.shape for i in results[0]["y"]] == [(300, 300, 3)] + assert [i.numpy_image.shape for i in results[1]["x"]] == [(120, 120, 3), (50, 50, 3)] + assert [i.numpy_image.shape for i in results[1]["y"]] == [(320, 320, 3)] + + +WORKFLOW_WITH_NON_SIMD_CUSTOMER_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + "shape": (50, 50, 3) + }, + { + "type": "NonSIMDConsumerAcceptingDict", + "name": "image_consumer", + "x": { + "a": "$inputs.image_1", + "b": "$steps.image_producer_x.image", + "c": "$inputs.image_3" + }, + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_non_simd_consumers_accepting_dict_of_batch_and_scalar_selector( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_SIMD_CUSTOMER_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run(runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + }) + + # then + assert len(results) == 2 + assert [i.numpy_image.shape for i in results[0]["x"]] == [(100, 100, 3), (50, 50, 3), (300, 300, 3)] + assert [i.numpy_image.shape for i in results[1]["x"]] == [(120, 120, 3), (50, 50, 3), (320, 320, 3)] + + +WORKFLOW_WITH_NON_SIMD_CUSTOMER_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2 = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + { + "type": "DoubleBoostDimensionality", + "name": "dimensionality_boost", + "x": "$inputs.image_1", + "y": "$inputs.image_3", + }, + { + "type": "ImageProducer", + "name": "image_producer_x", + "shape": (50, 50, 3) + }, + { + "type": "NonSIMDConsumerAcceptingDict", + "name": "image_consumer", + "x": { + "a": "$steps.dimensionality_boost.x", + "b": "$steps.image_producer_x.image", + "c": "$steps.dimensionality_boost.y", + }, + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_non_simd_consumers_accepting_dict_of_batch_and_scalar_selector_when_batch_at_dim_2( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_SIMD_CUSTOMER_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run(runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + }) + + # then + assert len(results) == 2 + assert [i.numpy_image.shape for i in results[0]["x"][0]] == [(100, 100, 3), (50, 50, 3), (300, 300, 3)] + assert [i.numpy_image.shape for i in results[0]["x"][1]] == [(100, 100, 3), (50, 50, 3), (300, 300, 3)] + assert [i.numpy_image.shape for i in results[1]["x"][0]] == [(120, 120, 3), (50, 50, 3), (320, 320, 3)] + assert [i.numpy_image.shape for i in results[1]["x"][1]] == [(120, 120, 3), (50, 50, 3), (320, 320, 3)] + + +WORKFLOW_WITH_NON_SIMD_CUSTOMER_INCREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2 = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + { + "type": "DoubleBoostDimensionality", + "name": "dimensionality_boost", + "x": "$inputs.image_1", + "y": "$inputs.image_3", + }, + { + "type": "ImageProducer", + "name": "image_producer_x", + "shape": (50, 50, 3) + }, + { + "type": "NonSIMDConsumerAcceptingDictIncDim", + "name": "image_consumer", + "x": { + "a": "$steps.dimensionality_boost.x", + "b": "$steps.image_producer_x.image", + "c": "$steps.dimensionality_boost.y", + }, + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_non_simd_consumer_inc_dim_accepting_dict_of_batch_and_scalar_selector_when_batch_at_dim_2( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_SIMD_CUSTOMER_INCREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run(runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + }) + + # then + assert len(results) == 2 + assert [i.numpy_image.shape for i in results[0]["x"][0][0]] == [(100, 100, 3), (50, 50, 3), (300, 300, 3)] + assert [i.numpy_image.shape for i in results[0]["x"][0][1]] == [(100, 100, 3), (50, 50, 3), (300, 300, 3)] + assert [i.numpy_image.shape for i in results[0]["x"][1][0]] == [(100, 100, 3), (50, 50, 3), (300, 300, 3)] + assert [i.numpy_image.shape for i in results[0]["x"][1][1]] == [(100, 100, 3), (50, 50, 3), (300, 300, 3)] + assert [i.numpy_image.shape for i in results[1]["x"][0][0]] == [(120, 120, 3), (50, 50, 3), (320, 320, 3)] + assert [i.numpy_image.shape for i in results[1]["x"][0][1]] == [(120, 120, 3), (50, 50, 3), (320, 320, 3)] + assert [i.numpy_image.shape for i in results[1]["x"][1][0]] == [(120, 120, 3), (50, 50, 3), (320, 320, 3)] + assert [i.numpy_image.shape for i in results[1]["x"][1][1]] == [(120, 120, 3), (50, 50, 3), (320, 320, 3)] + + +WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2 = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + { + "type": "DoubleBoostDimensionality", + "name": "dimensionality_boost", + "x": "$inputs.image_1", + "y": "$inputs.image_3", + }, + { + "type": "ImageProducer", + "name": "image_producer_x", + "shape": (50, 50, 3) + }, + { + "type": "NonSIMDConsumerAcceptingDictDecDim", + "name": "image_consumer", + "x": { + "a": "$steps.dimensionality_boost.x", + "b": "$steps.image_producer_x.image", + "c": "$steps.dimensionality_boost.y", + }, + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_non_simd_consumer_dec_dim_accepting_dict_of_batch_and_scalar_selector_when_batch_at_dim_2( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run(runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + }) + + # then + assert len(results) == 2 + assert [i.numpy_image.shape for i in results[0]["x"][0]] == [(100, 100, 3), (100, 100, 3)] + assert [i.numpy_image.shape for i in results[0]["x"][1]] == [(50, 50, 3), (50, 50, 3)] + assert [i.numpy_image.shape for i in results[0]["x"][2]] == [(300, 300, 3), (300, 300, 3)] + assert [i.numpy_image.shape for i in results[1]["x"][0]] == [(120, 120, 3), (120, 120, 3)] + assert [i.numpy_image.shape for i in results[1]["x"][1]] == [(50, 50, 3), (50, 50, 3)] + assert [i.numpy_image.shape for i in results[1]["x"][2]] == [(320, 320, 3), (320, 320, 3)] + + +WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1 = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + "shape": (50, 50, 3) + }, + { + "type": "NonSIMDConsumerAcceptingDictDecDim", + "name": "image_consumer", + "x": { + "a": "$inputs.image_1", + "b": "$steps.image_producer_x.image", + "c": "$inputs.image_3", + }, + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_non_simd_consumer_dec_dim_accepting_dict_of_batch_and_scalar_selector_when_batch_at_dim_1( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run(runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + }) + + # then + assert len(results) == 1 + assert [i.numpy_image.shape for i in results[0]["x"][0]] == [(100, 100, 3), (120, 120, 3)] + assert [i.numpy_image.shape for i in results[0]["x"][1]] == [(50, 50, 3), (50, 50, 3)] + assert [i.numpy_image.shape for i in results[0]["x"][2]] == [(300, 300, 3), (320, 320, 3)] + + +WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_SCALARS = { + "version": "1.1", + "inputs": [], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + "shape": (50, 50, 3) + }, + { + "type": "ImageProducer", + "name": "image_producer_y", + "shape": (60, 60, 3) + }, + { + "type": "NonSIMDConsumerAcceptingDictDecDim", + "name": "image_consumer", + "x": { + "a": "$steps.image_producer_x.image", + "b": "$steps.image_producer_y.image", + }, + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_non_simd_consumer_dec_dim_accepting_dict_of_batch_and_scalar_selectors( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_SCALARS, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run(runtime_parameters={}) + + # then + assert len(results) == 1 + assert [i.numpy_image.shape for i in results[0]["x"][0]] == [(50, 50, 3)] + assert [i.numpy_image.shape for i in results[0]["x"][1]] == [(60, 60, 3)] + + +WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2 = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + { + "type": "DoubleBoostDimensionality", + "name": "dimensionality_boost", + "x": "$inputs.image_1", + "y": "$inputs.image_3", + }, + { + "type": "ImageProducer", + "name": "image_producer_x", + "shape": (50, 50, 3) + }, + { + "type": "NonSIMDConsumerAcceptingListDecDim", + "name": "image_consumer", + "x": [ + "$steps.dimensionality_boost.x", + "$steps.image_producer_x.image", + "$steps.dimensionality_boost.y", + ], + "y": "some-value" + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_non_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_selector_when_batch_at_dim_2( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run(runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + }) + + # then + assert len(results) == 2 + assert [i.numpy_image.shape for i in results[0]["x"]] == [(100, 100, 3), (100, 100, 3), (50, 50, 3), (50, 50, 3), (300, 300, 3), (300, 300, 3)] + assert [i.numpy_image.shape for i in results[1]["x"]] == [(120, 120, 3), (120, 120, 3), (50, 50, 3), (50, 50, 3), (320, 320, 3), (320, 320, 3)] + + +WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1 = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + "shape": (50, 50, 3) + }, + { + "type": "NonSIMDConsumerAcceptingListDecDim", + "name": "image_consumer", + "x": ["$inputs.image_1", "$steps.image_producer_x.image", "$inputs.image_3"], + "y": "some-value" + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_non_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_selector_when_batch_at_dim_1( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run(runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + }) + + # then + assert len(results) == 1 + assert [i.numpy_image.shape for i in results[0]["x"]] == [(100, 100, 3), (120, 120, 3), (50, 50, 3), (50, 50, 3), (300, 300, 3), (320, 320, 3)] + + +WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_SCALARS = { + "version": "1.1", + "inputs": [], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + "shape": (50, 50, 3) + }, + { + "type": "ImageProducer", + "name": "image_producer_y", + "shape": (60, 60, 3) + }, + { + "type": "NonSIMDConsumerAcceptingListDecDim", + "name": "image_consumer", + "x": ["$steps.image_producer_x.image", "$steps.image_producer_y.image"], + "y": "some-value" + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_non_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_selectors( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_SCALARS, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run(runtime_parameters={}) + + # then + assert len(results) == 1 + assert [i.numpy_image.shape for i in results[0]["x"]] == [(50, 50, 3), (60, 60, 3)] + + +WORKFLOW_WITH_SIMD_CUSTOMER_ACCEPTING_LIST_OF_SCALAR_IMAGES = { + "version": "1.1", + "inputs": [], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + "shape": (100, 100, 3) + }, + { + "type": "ImageProducer", + "name": "image_producer_y", + "shape": (200, 200, 3), + }, + { + "type": "ImageProducer", + "name": "image_producer_z", + "shape": (300, 300, 3), + }, + { + "type": "SIMDConsumerAcceptingList", + "name": "image_consumer", + "x": ["$steps.image_producer_x.image", "$steps.image_producer_y.image"], + "y": ["$steps.image_producer_z.image"], + }, + { + "type": "BoostDimensionality", + "name": "dimensionality_boost", + "x": "$steps.image_consumer.x" + } + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.dimensionality_boost.x", + }, + { + "type": "JsonField", + "name": "y", + "selector": "$steps.image_consumer.y", + }, + ], +} + + @mock.patch.object(blocks_loader, "get_plugin_modules") def test_workflow_with_simd_consumers_accepting_list_of_scalar_selector( get_plugin_modules_mock: MagicMock, @@ -1203,7 +2016,7 @@ def test_workflow_with_simd_consumers_accepting_list_of_scalar_selector( "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, } execution_engine = ExecutionEngine.init( - workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_ACCEPTING_LIST_OF_SIMD_IMAGES, + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_ACCEPTING_LIST_OF_SCALAR_IMAGES, init_parameters=workflow_init_parameters, max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, ) @@ -1228,7 +2041,7 @@ def test_workflow_with_simd_consumers_accepting_list_of_scalar_selector( ], "steps": [ { - "type": "NonSIMDConsumerAcceptingList", + "type": "SIMDConsumerAcceptingList", "name": "image_consumer", "x": ["$inputs.image_1", "$inputs.image_2"], "y": ["$inputs.image_3"], @@ -1297,7 +2110,7 @@ def test_workflow_with_simd_consumers_accepting_list_of_batch_selector( "shape": (50, 50, 3) }, { - "type": "NonSIMDConsumerAcceptingList", + "type": "SIMDConsumerAcceptingList", "name": "image_consumer", "x": ["$inputs.image_1", "$steps.image_producer_x.image"], "y": ["$inputs.image_3"], @@ -1365,7 +2178,7 @@ def test_workflow_with_simd_consumers_accepting_list_of_batch_and_scalar_selecto "shape": (50, 50, 3) }, { - "type": "NonSIMDConsumerAcceptingDict", + "type": "SIMDConsumerAcceptingDict", "name": "image_consumer", "x": { "a": "$inputs.image_1", @@ -1435,7 +2248,7 @@ def test_workflow_with_simd_consumers_accepting_dict_of_batch_and_scalar_selecto "shape": (50, 50, 3) }, { - "type": "NonSIMDConsumerAcceptingDict", + "type": "SIMDConsumerAcceptingDict", "name": "image_consumer", "x": { "a": "$steps.dimensionality_boost.x", @@ -1488,6 +2301,9 @@ def test_workflow_with_simd_consumers_accepting_dict_of_batch_and_scalar_selecto assert [i.numpy_image.shape for i in results[1]["x"][1]] == [(120, 120, 3), (50, 50, 3), (320, 320, 3)] +######### ============ + + WORKFLOW_WITH_SIMD_CUSTOMER_INCREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2 = { "version": "1.1", "inputs": [ @@ -1507,7 +2323,7 @@ def test_workflow_with_simd_consumers_accepting_dict_of_batch_and_scalar_selecto "shape": (50, 50, 3) }, { - "type": "NonSIMDConsumerAcceptingDictIncDim", + "type": "SIMDConsumerAcceptingDictIncDim", "name": "image_consumer", "x": { "a": "$steps.dimensionality_boost.x", @@ -1583,7 +2399,7 @@ def test_workflow_with_simd_consumer_inc_dim_accepting_dict_of_batch_and_scalar_ "shape": (50, 50, 3) }, { - "type": "NonSIMDConsumerAcceptingDictDecDim", + "type": "SIMDConsumerAcceptingDictDecDim", "name": "image_consumer", "x": { "a": "$steps.dimensionality_boost.x", @@ -1629,6 +2445,7 @@ def test_workflow_with_simd_consumer_dec_dim_accepting_dict_of_batch_and_scalar_ }) # then + print(results) assert len(results) == 2 assert [i.numpy_image.shape for i in results[0]["x"][0]] == [(100, 100, 3), (100, 100, 3)] assert [i.numpy_image.shape for i in results[0]["x"][1]] == [(50, 50, 3), (50, 50, 3)] @@ -1638,7 +2455,7 @@ def test_workflow_with_simd_consumer_dec_dim_accepting_dict_of_batch_and_scalar_ assert [i.numpy_image.shape for i in results[1]["x"][2]] == [(320, 320, 3), (320, 320, 3)] -WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1 = { +WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1 = { "version": "1.1", "inputs": [ {"type": "WorkflowImage", "name": "image_1"}, @@ -1671,7 +2488,7 @@ def test_workflow_with_simd_consumer_dec_dim_accepting_dict_of_batch_and_scalar_ @mock.patch.object(blocks_loader, "get_plugin_modules") -def test_workflow_with_simd_consumer_dec_dim_accepting_dict_of_batch_and_scalar_selector_when_batch_at_dim_1( +def test_workflow_with_non_simd_consumer_dec_dim_accepting_dict_of_batch_and_scalar_selector_when_batch_at_dim_1( get_plugin_modules_mock: MagicMock, model_manager: ModelManager, ) -> None: @@ -1685,7 +2502,7 @@ def test_workflow_with_simd_consumer_dec_dim_accepting_dict_of_batch_and_scalar_ "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, } execution_engine = ExecutionEngine.init( - workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1, + workflow_definition=WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1, init_parameters=workflow_init_parameters, max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, ) @@ -1703,7 +2520,7 @@ def test_workflow_with_simd_consumer_dec_dim_accepting_dict_of_batch_and_scalar_ assert [i.numpy_image.shape for i in results[0]["x"][2]] == [(300, 300, 3), (320, 320, 3)] -WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_SCALARS = { +WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_SCALARS = { "version": "1.1", "inputs": [], "steps": [ @@ -1737,7 +2554,7 @@ def test_workflow_with_simd_consumer_dec_dim_accepting_dict_of_batch_and_scalar_ @mock.patch.object(blocks_loader, "get_plugin_modules") -def test_workflow_with_simd_consumer_dec_dim_accepting_dict_of_batch_and_scalar_selectors( +def test_workflow_with_non_simd_consumer_dec_dim_accepting_dict_of_batch_and_scalar_selectors( get_plugin_modules_mock: MagicMock, model_manager: ModelManager, ) -> None: @@ -1751,7 +2568,7 @@ def test_workflow_with_simd_consumer_dec_dim_accepting_dict_of_batch_and_scalar_ "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, } execution_engine = ExecutionEngine.init( - workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_SCALARS, + workflow_definition=WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_SCALARS, init_parameters=workflow_init_parameters, max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, ) @@ -1765,9 +2582,7 @@ def test_workflow_with_simd_consumer_dec_dim_accepting_dict_of_batch_and_scalar_ assert [i.numpy_image.shape for i in results[0]["x"][1]] == [(60, 60, 3)] -##################################### - -WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2 = { +WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2 = { "version": "1.1", "inputs": [ {"type": "WorkflowImage", "name": "image_1"}, @@ -1807,7 +2622,7 @@ def test_workflow_with_simd_consumer_dec_dim_accepting_dict_of_batch_and_scalar_ @mock.patch.object(blocks_loader, "get_plugin_modules") -def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_selector_when_batch_at_dim_2( +def test_workflow_with_non_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_selector_when_batch_at_dim_2( get_plugin_modules_mock: MagicMock, model_manager: ModelManager, ) -> None: @@ -1821,7 +2636,7 @@ def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_ "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, } execution_engine = ExecutionEngine.init( - workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2, + workflow_definition=WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2, init_parameters=workflow_init_parameters, max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, ) @@ -1838,7 +2653,7 @@ def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_ assert [i.numpy_image.shape for i in results[1]["x"]] == [(120, 120, 3), (120, 120, 3), (50, 50, 3), (50, 50, 3), (320, 320, 3), (320, 320, 3)] -WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1 = { +WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1 = { "version": "1.1", "inputs": [ {"type": "WorkflowImage", "name": "image_1"}, @@ -1868,7 +2683,7 @@ def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_ @mock.patch.object(blocks_loader, "get_plugin_modules") -def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_selector_when_batch_at_dim_1( +def test_workflow_with_non_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_selector_when_batch_at_dim_1( get_plugin_modules_mock: MagicMock, model_manager: ModelManager, ) -> None: @@ -1882,7 +2697,7 @@ def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_ "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, } execution_engine = ExecutionEngine.init( - workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1, + workflow_definition=WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1, init_parameters=workflow_init_parameters, max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, ) @@ -1898,7 +2713,7 @@ def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_ assert [i.numpy_image.shape for i in results[0]["x"]] == [(100, 100, 3), (120, 120, 3), (50, 50, 3), (50, 50, 3), (300, 300, 3), (320, 320, 3)] -WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_SCALARS = { +WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_SCALARS = { "version": "1.1", "inputs": [], "steps": [ @@ -1930,7 +2745,7 @@ def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_ @mock.patch.object(blocks_loader, "get_plugin_modules") -def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_selectors( +def test_workflow_with_non_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_selectors( get_plugin_modules_mock: MagicMock, model_manager: ModelManager, ) -> None: @@ -1944,7 +2759,7 @@ def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_ "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, } execution_engine = ExecutionEngine.init( - workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_SCALARS, + workflow_definition=WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_SCALARS, init_parameters=workflow_init_parameters, max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, ) @@ -1955,3 +2770,4 @@ def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_ # then assert len(results) == 1 assert [i.numpy_image.shape for i in results[0]["x"]] == [(50, 50, 3), (60, 60, 3)] + From 55211a271bd19a2cd697571fa2ecf2d73ebab86b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Fri, 22 Aug 2025 14:17:08 +0200 Subject: [PATCH 08/20] Finish testing alignment of ABC with dimensionality manipulations --- .../v1/compiler/graph_constructor.py | 5 +- .../step_input_assembler.py | 10 +- .../unit_tests/core/utils/test_file_system.py | 32 +- .../plugin_image_producer/__init__.py | 87 +- ...ng_scalars_to_fit_into_batch_parameters.py | 838 +++++++++++------- .../transformations/test_qr_code_generator.py | 124 +-- 6 files changed, 675 insertions(+), 421 deletions(-) diff --git a/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py b/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py index a4eb9f637c..802e333d56 100644 --- a/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py +++ b/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py @@ -1634,7 +1634,10 @@ def get_input_data_lineage_excluding_auto_batch_casting( input_definition=input_definition, lineage_deduplication_set=lineage_deduplication_set, ) - if property_name in scalar_parameters_to_be_batched and len(new_lineages_detected_within_property_data) == 0: + if ( + property_name in scalar_parameters_to_be_batched + and len(new_lineages_detected_within_property_data) == 0 + ): continue lineages.extend(new_lineages_detected_within_property_data) if not lineages: diff --git a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py index e14a2c2c6c..457ac4df7c 100644 --- a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py +++ b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py @@ -647,11 +647,11 @@ def get_non_compound_parameter_value( if not step_requests_batch_input: raise AssumptionError( public_message=f"Parameter: {parameter.parameter_specification.parameter_name} " - f"requires dimensionality wrapping, but registered lineage support is incompatible " - f"which should be detected by the compiler. This is most likely a bug. " - f"Contact Roboflow team through github issues " - f"(https://github.com/roboflow/inference/issues) providing full context of" - f"the problem - including workflow definition you use.", + f"requires dimensionality wrapping, but registered lineage support is incompatible " + f"which should be detected by the compiler. This is most likely a bug. " + f"Contact Roboflow team through github issues " + f"(https://github.com/roboflow/inference/issues) providing full context of" + f"the problem - including workflow definition you use.", context="workflow_execution | step_input_assembling", ) upper_level_indices = [()] diff --git a/tests/inference/unit_tests/core/utils/test_file_system.py b/tests/inference/unit_tests/core/utils/test_file_system.py index 68b2e83c9e..7792a300e3 100644 --- a/tests/inference/unit_tests/core/utils/test_file_system.py +++ b/tests/inference/unit_tests/core/utils/test_file_system.py @@ -449,7 +449,7 @@ def test_atomic_path_with_existing_file_override(empty_local_dir: str) -> None: target_path = os.path.join(empty_local_dir, "test.txt") original_content = "original" new_content = "new content" - + with open(target_path, "w") as f: f.write(original_content) @@ -511,7 +511,7 @@ def test_dump_json_atomic_when_file_exists_with_override(empty_local_dir: str) - file_path = os.path.join(empty_local_dir, "test.json") original_content = {"old": "data"} new_content = {"new": "data"} - + with open(file_path, "w") as f: json.dump(original_content, f) @@ -569,7 +569,9 @@ def test_dump_text_lines_atomic_when_file_does_not_exist(empty_local_dir: str) - assert f.read() == "line1\nline2\nline3" -def test_dump_text_lines_atomic_when_file_exists_no_override(empty_local_dir: str) -> None: +def test_dump_text_lines_atomic_when_file_exists_no_override( + empty_local_dir: str, +) -> None: # given file_path = os.path.join(empty_local_dir, "test.txt") touch(file_path) @@ -579,12 +581,14 @@ def test_dump_text_lines_atomic_when_file_exists_no_override(empty_local_dir: st dump_text_lines_atomic(path=file_path, content=["line1"], allow_override=False) -def test_dump_text_lines_atomic_when_file_exists_with_override(empty_local_dir: str) -> None: +def test_dump_text_lines_atomic_when_file_exists_with_override( + empty_local_dir: str, +) -> None: # given file_path = os.path.join(empty_local_dir, "test.txt") with open(file_path, "w") as f: f.write("original content") - + new_content = ["new", "lines"] # when @@ -654,7 +658,7 @@ def test_dump_bytes_atomic_when_file_exists_with_override(empty_local_dir: str) file_path = os.path.join(empty_local_dir, "test.bin") with open(file_path, "wb") as f: f.write(b"original data") - + new_content = b"new binary data" # when @@ -687,14 +691,14 @@ def test_atomic_write_maintains_original_on_error(empty_local_dir: str) -> None: # given file_path = os.path.join(empty_local_dir, "test.txt") original_content = "original content that should be preserved" - + with open(file_path, "w") as f: f.write(original_content) - + # when - simulate a write error by mocking class WriteError(Exception): pass - + try: with AtomicPath(file_path, allow_override=True) as temp_path: with open(temp_path, "w") as f: @@ -703,7 +707,7 @@ class WriteError(Exception): raise WriteError("Simulated write failure") except WriteError: pass - + # then - original file should be unchanged assert os.path.exists(file_path) with open(file_path) as f: @@ -714,20 +718,20 @@ def test_atomic_operations_concurrent_safety(empty_local_dir: str) -> None: """Test that temp files don't collide when multiple atomic writes happen""" # given target_path = os.path.join(empty_local_dir, "test.txt") - + # when - create multiple atomic writes to same target temp_paths = [] contexts = [] - + for i in range(3): ctx = AtomicPath(target_path, allow_override=True) temp_path = ctx.__enter__() temp_paths.append(temp_path) contexts.append(ctx) - + # then - all temp paths should be unique assert len(set(temp_paths)) == 3 - + # cleanup for ctx, temp_path in zip(contexts, temp_paths): try: diff --git a/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py b/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py index 129e297ade..1349bf2d80 100644 --- a/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py +++ b/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py @@ -1,6 +1,6 @@ import json from collections import defaultdict -from typing import Any, List, Literal, Optional, Tuple, Type, Union, Dict +from typing import Any, Dict, List, Literal, Optional, Tuple, Type, Union from uuid import uuid4 import numpy as np @@ -276,7 +276,10 @@ def get_manifest(cls) -> Type[WorkflowBlockManifest]: return MultiNonSIMDImageConsumerDecreasingDimManifest def run( - self, images_x: Batch[WorkflowImageData], images_y: Batch[WorkflowImageData], additional: Any + self, + images_x: Batch[WorkflowImageData], + images_y: Batch[WorkflowImageData], + additional: Any, ) -> BlockResult: assert not isinstance(additional, Batch) print("images_x", images_x, "images_y", images_y) @@ -319,7 +322,10 @@ def get_manifest(cls) -> Type[WorkflowBlockManifest]: return MultiSIMDImageConsumerDecreasingDimManifest def run( - self, images_x: Batch[Batch[WorkflowImageData]], images_y: Batch[Batch[WorkflowImageData]], additional: Any + self, + images_x: Batch[Batch[WorkflowImageData]], + images_y: Batch[Batch[WorkflowImageData]], + additional: Any, ) -> BlockResult: assert not isinstance(additional, Batch) print("images_x", images_x, "images_y", images_y) @@ -488,7 +494,9 @@ class SIMDConsumerAcceptingListBlock(WorkflowBlock): def get_manifest(cls) -> Type[WorkflowBlockManifest]: return SIMDConsumerAcceptingListManifest - def run(self, x: List[Batch[WorkflowImageData]], y: List[Batch[WorkflowImageData]]) -> BlockResult: + def run( + self, x: List[Batch[WorkflowImageData]], y: List[Batch[WorkflowImageData]] + ) -> BlockResult: idx2x = defaultdict(list) idx2y = defaultdict(list) for batch_x in x: @@ -672,10 +680,12 @@ def run(self, x: Dict[str, Batch[Any]]) -> BlockResult: assert reference_indices is not None results = [] for idx in reference_indices: - results.append([ - {"x": [keys_stashes[k][idx] for k in sorted_keys]}, - {"x": [keys_stashes[k][idx] for k in sorted_keys]} - ]) + results.append( + [ + {"x": [keys_stashes[k][idx] for k in sorted_keys]}, + {"x": [keys_stashes[k][idx] for k in sorted_keys]}, + ] + ) return results @@ -694,7 +704,7 @@ def get_execution_engine_compatibility(cls) -> Optional[str]: @classmethod def get_output_dimensionality_offset( - cls, + cls, ) -> int: return -1 @@ -713,6 +723,48 @@ def run(self, x: Batch[list], y: str) -> BlockResult: return {"x": [f for e in x for f in e]} +class SIMDConsumerAcceptingListDecDimManifest(WorkflowBlockManifest): + type: Literal["SIMDConsumerAcceptingListDecDim"] + x: List[Selector(kind=[IMAGE_KIND])] + y: Union[Selector(), str] + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="x")] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + @classmethod + def get_output_dimensionality_offset( + cls, + ) -> int: + return -1 + + @classmethod + def get_parameters_accepting_batches(cls) -> List[str]: + return ["x"] + + +class SIMDConsumerAcceptingListDecDimBlock(WorkflowBlock): + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return SIMDConsumerAcceptingListDecDimManifest + + def run(self, x: List[Batch[Batch[WorkflowImageData]]], y: str) -> BlockResult: + assert not isinstance(y, Batch) + idx2x = defaultdict(list) + for batch_x in x: + for idx, el in enumerate(batch_x): + idx2x[idx].extend(list(el)) + indices_x = sorted(idx2x.keys()) + results = [] + for idx in indices_x: + results.append({"x": idx2x[idx]}) + return results + + class NonSIMDConsumerAcceptingDictDecDimManifest(WorkflowBlockManifest): type: Literal["NonSIMDConsumerAcceptingDictDecDim"] x: Dict[str, Selector(kind=[IMAGE_KIND])] @@ -727,7 +779,7 @@ def get_execution_engine_compatibility(cls) -> Optional[str]: @classmethod def get_output_dimensionality_offset( - cls, + cls, ) -> int: return -1 @@ -766,7 +818,7 @@ def get_execution_engine_compatibility(cls) -> Optional[str]: @classmethod def get_output_dimensionality_offset( - cls, + cls, ) -> int: return -1 @@ -799,17 +851,9 @@ def run(self, x: Dict[str, Batch[Batch[Any]]]) -> BlockResult: for idx in reference_indices: merged = [] for k in sorted_keys: - merged.extend(keys_stashes[k][idx]) + merged.append(keys_stashes[k][idx]) results.append({"x": merged}) return results - # results = [] - # sorted_keys = sorted(x.keys()) - # for k in sorted_keys: - # v = x[k] - # assert isinstance(v, Batch) - # result = [e for e in v] - # results.append(result) - # return {"x": results} def load_blocks() -> List[Type[WorkflowBlock]]: @@ -836,5 +880,6 @@ def load_blocks() -> List[Type[WorkflowBlock]]: SIMDConsumerAcceptingListBlock, SIMDConsumerAcceptingDictBlock, SIMDConsumerAcceptingDictIncDimBlock, - SIMDConsumerAcceptingDictDecDimBlock + SIMDConsumerAcceptingDictDecDimBlock, + SIMDConsumerAcceptingListDecDimBlock, ] diff --git a/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py b/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py index eb626d6d1e..3376226ff0 100644 --- a/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py +++ b/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py @@ -610,7 +610,7 @@ def test_workflow_with_scalar_producer_and_batch_input_feeding_non_simd_consumer "name": "image_consumer", "images_x": "$steps.identity_simd.x", "images_y": "$steps.image_producer_y.image", - "additional": "$inputs.confidence" + "additional": "$inputs.confidence", }, ], "outputs": [ @@ -671,7 +671,7 @@ def test_workflow_with_multiple_scalar_producers_feeding_non_simd_consumer_decre "name": "image_consumer", "images_x": "$steps.identity_simd.x", "images_y": "$inputs.image", - "additional": "$inputs.confidence" + "additional": "$inputs.confidence", }, ], "outputs": [ @@ -742,7 +742,7 @@ def test_workflow_with_scalar_producer_and_batch_input_feeding_non_simd_consumer "name": "image_consumer", "images_x": "$steps.identity_simd.x", "images_y": "$steps.image_producer_y.image", - "additional": "$inputs.confidence" + "additional": "$inputs.confidence", }, ], "outputs": [ @@ -803,7 +803,7 @@ def test_workflow_with_multiple_scalar_producers_feeding_simd_consumer_decreasin "name": "image_consumer", "images_x": "$steps.identity_simd.x", "images_y": "$inputs.image", - "additional": "$inputs.confidence" + "additional": "$inputs.confidence", }, ], "outputs": [ @@ -870,7 +870,7 @@ def test_workflow_with_scalar_producer_and_batch_input_feeding_simd_consumer_dec "name": "image_consumer", "images_x": "$steps.identity_simd.x", "images_y": "$steps.identity_non_simd.x", - "additional": "$inputs.confidence" + "additional": "$inputs.confidence", }, { "type": "IdentitySIMD", @@ -913,10 +913,12 @@ def test_workflow_with_batched_inputs_at_dim_1_fed_into_consumer_decreasing_the_ ) # when - result = execution_engine.run(runtime_parameters={ - "image_1": [image_1, image_2], - "image_2": [image_3, image_4], - }) + result = execution_engine.run( + runtime_parameters={ + "image_1": [image_1, image_2], + "image_2": [image_3, image_4], + } + ) # then assert result == [ @@ -947,7 +949,7 @@ def test_workflow_with_batched_inputs_at_dim_1_fed_into_consumer_decreasing_the_ "name": "image_consumer", "images_x": "$steps.identity_simd.x", "images_y": "$steps.identity_non_simd.x", - "additional": "$inputs.confidence" + "additional": "$inputs.confidence", }, { "type": "IdentitySIMD", @@ -957,8 +959,8 @@ def test_workflow_with_batched_inputs_at_dim_1_fed_into_consumer_decreasing_the_ { "type": "BoostDimensionality", "name": "dimensionality_boost", - "x": "$steps.identity_simd_2.x" - } + "x": "$steps.identity_simd_2.x", + }, ], "outputs": [ { @@ -1015,7 +1017,7 @@ def test_workflow_with_batched_inputs_at_dim_1_fed_into_consumer_decreasing_the_ "name": "image_consumer", "images_x": "$steps.dimensionality_boost.x", "images_y": "$steps.dimensionality_boost.y", - "additional": "$inputs.confidence" + "additional": "$inputs.confidence", }, { "type": "IdentitySIMD", @@ -1058,15 +1060,17 @@ def test_workflow_with_batched_inputs_at_dim_2_fed_into_consumer_decreasing_the_ ) # when - result = execution_engine.run(runtime_parameters={ - "image_1": [image_1, image_2], - "image_2": [image_3, image_4], - }) + result = execution_engine.run( + runtime_parameters={ + "image_1": [image_1, image_2], + "image_2": [image_3, image_4], + } + ) # then assert result == [ {"shapes": "[200, 100, 3][400, 100, 3]\n[200, 100, 3][400, 100, 3]"}, - {"shapes": "[300, 100, 3][500, 100, 3]\n[300, 100, 3][500, 100, 3]"} + {"shapes": "[300, 100, 3][500, 100, 3]\n[300, 100, 3][500, 100, 3]"}, ] @@ -1089,7 +1093,7 @@ def test_workflow_with_batched_inputs_at_dim_2_fed_into_consumer_decreasing_the_ "name": "image_consumer", "images_x": "$steps.image_producer_x.image", "images_y": "$steps.image_producer_y.image", - "additional": "$inputs.confidence" + "additional": "$inputs.confidence", }, { "type": "IdentitySIMD", @@ -1099,8 +1103,8 @@ def test_workflow_with_batched_inputs_at_dim_2_fed_into_consumer_decreasing_the_ { "type": "BoostDimensionality", "name": "dimensionality_boost", - "x": "$steps.identity_simd_2.x" - } + "x": "$steps.identity_simd_2.x", + }, ], "outputs": [ { @@ -1138,7 +1142,7 @@ def test_workflow_with_scalar_inputs_fed_into_consumer_decreasing_the_dimensiona # then assert results == [ {"shapes": "[192, 168, 3][192, 168, 3]"}, - {"shapes": "[192, 168, 3][192, 168, 3]"} + {"shapes": "[192, 168, 3][192, 168, 3]"}, ] @@ -1146,11 +1150,7 @@ def test_workflow_with_scalar_inputs_fed_into_consumer_decreasing_the_dimensiona "version": "1.1", "inputs": [], "steps": [ - { - "type": "ImageProducer", - "name": "image_producer_x", - "shape": (100, 100, 3) - }, + {"type": "ImageProducer", "name": "image_producer_x", "shape": (100, 100, 3)}, { "type": "ImageProducer", "name": "image_producer_y", @@ -1170,8 +1170,8 @@ def test_workflow_with_scalar_inputs_fed_into_consumer_decreasing_the_dimensiona { "type": "BoostDimensionality", "name": "dimensionality_boost", - "x": "$steps.image_consumer.x" - } + "x": "$steps.image_consumer.x", + }, ], "outputs": [ { @@ -1213,9 +1213,15 @@ def test_workflow_with_non_simd_consumers_accepting_list_of_scalar_selector( # then assert len(results) == 2, "Expected dim increase to happen" - assert [i.numpy_image.shape for i in results[0]["x"]] == [(100, 100, 3), (200, 200, 3)] + assert [i.numpy_image.shape for i in results[0]["x"]] == [ + (100, 100, 3), + (200, 200, 3), + ] assert [i.numpy_image.shape for i in results[0]["y"]] == [(300, 300, 3)] - assert [i.numpy_image.shape for i in results[1]["x"]] == [(100, 100, 3), (200, 200, 3)] + assert [i.numpy_image.shape for i in results[1]["x"]] == [ + (100, 100, 3), + (200, 200, 3), + ] assert [i.numpy_image.shape for i in results[1]["y"]] == [(300, 300, 3)] @@ -1270,17 +1276,25 @@ def test_workflow_with_non_simd_consumers_accepting_list_of_batch_selector( ) # when - results = execution_engine.run(runtime_parameters={ - "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], - "image_2": [np.zeros((200, 200, 3)), np.zeros((220, 220, 3))], - "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], - }) + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_2": [np.zeros((200, 200, 3)), np.zeros((220, 220, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) # then assert len(results) == 2 - assert [i.numpy_image.shape for i in results[0]["x"]] == [(100, 100, 3), (200, 200, 3)] + assert [i.numpy_image.shape for i in results[0]["x"]] == [ + (100, 100, 3), + (200, 200, 3), + ] assert [i.numpy_image.shape for i in results[0]["y"]] == [(300, 300, 3)] - assert [i.numpy_image.shape for i in results[1]["x"]] == [(120, 120, 3), (220, 220, 3)] + assert [i.numpy_image.shape for i in results[1]["x"]] == [ + (120, 120, 3), + (220, 220, 3), + ] assert [i.numpy_image.shape for i in results[1]["y"]] == [(320, 320, 3)] @@ -1291,11 +1305,7 @@ def test_workflow_with_non_simd_consumers_accepting_list_of_batch_selector( {"type": "WorkflowImage", "name": "image_3"}, ], "steps": [ - { - "type": "ImageProducer", - "name": "image_producer_x", - "shape": (50, 50, 3) - }, + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, { "type": "NonSIMDConsumerAcceptingList", "name": "image_consumer", @@ -1339,16 +1349,24 @@ def test_workflow_with_non_simd_consumers_accepting_list_of_batch_and_scalar_sel ) # when - results = execution_engine.run(runtime_parameters={ - "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], - "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], - }) + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) # then assert len(results) == 2 - assert [i.numpy_image.shape for i in results[0]["x"]] == [(100, 100, 3), (50, 50, 3)] + assert [i.numpy_image.shape for i in results[0]["x"]] == [ + (100, 100, 3), + (50, 50, 3), + ] assert [i.numpy_image.shape for i in results[0]["y"]] == [(300, 300, 3)] - assert [i.numpy_image.shape for i in results[1]["x"]] == [(120, 120, 3), (50, 50, 3)] + assert [i.numpy_image.shape for i in results[1]["x"]] == [ + (120, 120, 3), + (50, 50, 3), + ] assert [i.numpy_image.shape for i in results[1]["y"]] == [(320, 320, 3)] @@ -1359,18 +1377,14 @@ def test_workflow_with_non_simd_consumers_accepting_list_of_batch_and_scalar_sel {"type": "WorkflowImage", "name": "image_3"}, ], "steps": [ - { - "type": "ImageProducer", - "name": "image_producer_x", - "shape": (50, 50, 3) - }, + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, { "type": "NonSIMDConsumerAcceptingDict", "name": "image_consumer", "x": { "a": "$inputs.image_1", "b": "$steps.image_producer_x.image", - "c": "$inputs.image_3" + "c": "$inputs.image_3", }, }, ], @@ -1405,15 +1419,25 @@ def test_workflow_with_non_simd_consumers_accepting_dict_of_batch_and_scalar_sel ) # when - results = execution_engine.run(runtime_parameters={ - "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], - "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], - }) + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) # then assert len(results) == 2 - assert [i.numpy_image.shape for i in results[0]["x"]] == [(100, 100, 3), (50, 50, 3), (300, 300, 3)] - assert [i.numpy_image.shape for i in results[1]["x"]] == [(120, 120, 3), (50, 50, 3), (320, 320, 3)] + assert [i.numpy_image.shape for i in results[0]["x"]] == [ + (100, 100, 3), + (50, 50, 3), + (300, 300, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"]] == [ + (120, 120, 3), + (50, 50, 3), + (320, 320, 3), + ] WORKFLOW_WITH_NON_SIMD_CUSTOMER_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2 = { @@ -1429,11 +1453,7 @@ def test_workflow_with_non_simd_consumers_accepting_dict_of_batch_and_scalar_sel "x": "$inputs.image_1", "y": "$inputs.image_3", }, - { - "type": "ImageProducer", - "name": "image_producer_x", - "shape": (50, 50, 3) - }, + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, { "type": "NonSIMDConsumerAcceptingDict", "name": "image_consumer", @@ -1475,17 +1495,35 @@ def test_workflow_with_non_simd_consumers_accepting_dict_of_batch_and_scalar_sel ) # when - results = execution_engine.run(runtime_parameters={ - "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], - "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], - }) + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) # then assert len(results) == 2 - assert [i.numpy_image.shape for i in results[0]["x"][0]] == [(100, 100, 3), (50, 50, 3), (300, 300, 3)] - assert [i.numpy_image.shape for i in results[0]["x"][1]] == [(100, 100, 3), (50, 50, 3), (300, 300, 3)] - assert [i.numpy_image.shape for i in results[1]["x"][0]] == [(120, 120, 3), (50, 50, 3), (320, 320, 3)] - assert [i.numpy_image.shape for i in results[1]["x"][1]] == [(120, 120, 3), (50, 50, 3), (320, 320, 3)] + assert [i.numpy_image.shape for i in results[0]["x"][0]] == [ + (100, 100, 3), + (50, 50, 3), + (300, 300, 3), + ] + assert [i.numpy_image.shape for i in results[0]["x"][1]] == [ + (100, 100, 3), + (50, 50, 3), + (300, 300, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"][0]] == [ + (120, 120, 3), + (50, 50, 3), + (320, 320, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"][1]] == [ + (120, 120, 3), + (50, 50, 3), + (320, 320, 3), + ] WORKFLOW_WITH_NON_SIMD_CUSTOMER_INCREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2 = { @@ -1501,11 +1539,7 @@ def test_workflow_with_non_simd_consumers_accepting_dict_of_batch_and_scalar_sel "x": "$inputs.image_1", "y": "$inputs.image_3", }, - { - "type": "ImageProducer", - "name": "image_producer_x", - "shape": (50, 50, 3) - }, + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, { "type": "NonSIMDConsumerAcceptingDictIncDim", "name": "image_consumer", @@ -1547,21 +1581,55 @@ def test_workflow_with_non_simd_consumer_inc_dim_accepting_dict_of_batch_and_sca ) # when - results = execution_engine.run(runtime_parameters={ - "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], - "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], - }) + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) # then assert len(results) == 2 - assert [i.numpy_image.shape for i in results[0]["x"][0][0]] == [(100, 100, 3), (50, 50, 3), (300, 300, 3)] - assert [i.numpy_image.shape for i in results[0]["x"][0][1]] == [(100, 100, 3), (50, 50, 3), (300, 300, 3)] - assert [i.numpy_image.shape for i in results[0]["x"][1][0]] == [(100, 100, 3), (50, 50, 3), (300, 300, 3)] - assert [i.numpy_image.shape for i in results[0]["x"][1][1]] == [(100, 100, 3), (50, 50, 3), (300, 300, 3)] - assert [i.numpy_image.shape for i in results[1]["x"][0][0]] == [(120, 120, 3), (50, 50, 3), (320, 320, 3)] - assert [i.numpy_image.shape for i in results[1]["x"][0][1]] == [(120, 120, 3), (50, 50, 3), (320, 320, 3)] - assert [i.numpy_image.shape for i in results[1]["x"][1][0]] == [(120, 120, 3), (50, 50, 3), (320, 320, 3)] - assert [i.numpy_image.shape for i in results[1]["x"][1][1]] == [(120, 120, 3), (50, 50, 3), (320, 320, 3)] + assert [i.numpy_image.shape for i in results[0]["x"][0][0]] == [ + (100, 100, 3), + (50, 50, 3), + (300, 300, 3), + ] + assert [i.numpy_image.shape for i in results[0]["x"][0][1]] == [ + (100, 100, 3), + (50, 50, 3), + (300, 300, 3), + ] + assert [i.numpy_image.shape for i in results[0]["x"][1][0]] == [ + (100, 100, 3), + (50, 50, 3), + (300, 300, 3), + ] + assert [i.numpy_image.shape for i in results[0]["x"][1][1]] == [ + (100, 100, 3), + (50, 50, 3), + (300, 300, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"][0][0]] == [ + (120, 120, 3), + (50, 50, 3), + (320, 320, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"][0][1]] == [ + (120, 120, 3), + (50, 50, 3), + (320, 320, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"][1][0]] == [ + (120, 120, 3), + (50, 50, 3), + (320, 320, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"][1][1]] == [ + (120, 120, 3), + (50, 50, 3), + (320, 320, 3), + ] WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2 = { @@ -1577,11 +1645,7 @@ def test_workflow_with_non_simd_consumer_inc_dim_accepting_dict_of_batch_and_sca "x": "$inputs.image_1", "y": "$inputs.image_3", }, - { - "type": "ImageProducer", - "name": "image_producer_x", - "shape": (50, 50, 3) - }, + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, { "type": "NonSIMDConsumerAcceptingDictDecDim", "name": "image_consumer", @@ -1623,19 +1687,39 @@ def test_workflow_with_non_simd_consumer_dec_dim_accepting_dict_of_batch_and_sca ) # when - results = execution_engine.run(runtime_parameters={ - "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], - "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], - }) + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) # then assert len(results) == 2 - assert [i.numpy_image.shape for i in results[0]["x"][0]] == [(100, 100, 3), (100, 100, 3)] - assert [i.numpy_image.shape for i in results[0]["x"][1]] == [(50, 50, 3), (50, 50, 3)] - assert [i.numpy_image.shape for i in results[0]["x"][2]] == [(300, 300, 3), (300, 300, 3)] - assert [i.numpy_image.shape for i in results[1]["x"][0]] == [(120, 120, 3), (120, 120, 3)] - assert [i.numpy_image.shape for i in results[1]["x"][1]] == [(50, 50, 3), (50, 50, 3)] - assert [i.numpy_image.shape for i in results[1]["x"][2]] == [(320, 320, 3), (320, 320, 3)] + assert [i.numpy_image.shape for i in results[0]["x"][0]] == [ + (100, 100, 3), + (100, 100, 3), + ] + assert [i.numpy_image.shape for i in results[0]["x"][1]] == [ + (50, 50, 3), + (50, 50, 3), + ] + assert [i.numpy_image.shape for i in results[0]["x"][2]] == [ + (300, 300, 3), + (300, 300, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"][0]] == [ + (120, 120, 3), + (120, 120, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"][1]] == [ + (50, 50, 3), + (50, 50, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"][2]] == [ + (320, 320, 3), + (320, 320, 3), + ] WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1 = { @@ -1645,11 +1729,7 @@ def test_workflow_with_non_simd_consumer_dec_dim_accepting_dict_of_batch_and_sca {"type": "WorkflowImage", "name": "image_3"}, ], "steps": [ - { - "type": "ImageProducer", - "name": "image_producer_x", - "shape": (50, 50, 3) - }, + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, { "type": "NonSIMDConsumerAcceptingDictDecDim", "name": "image_consumer", @@ -1691,32 +1771,35 @@ def test_workflow_with_non_simd_consumer_dec_dim_accepting_dict_of_batch_and_sca ) # when - results = execution_engine.run(runtime_parameters={ - "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], - "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], - }) + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) # then assert len(results) == 1 - assert [i.numpy_image.shape for i in results[0]["x"][0]] == [(100, 100, 3), (120, 120, 3)] - assert [i.numpy_image.shape for i in results[0]["x"][1]] == [(50, 50, 3), (50, 50, 3)] - assert [i.numpy_image.shape for i in results[0]["x"][2]] == [(300, 300, 3), (320, 320, 3)] + assert [i.numpy_image.shape for i in results[0]["x"][0]] == [ + (100, 100, 3), + (120, 120, 3), + ] + assert [i.numpy_image.shape for i in results[0]["x"][1]] == [ + (50, 50, 3), + (50, 50, 3), + ] + assert [i.numpy_image.shape for i in results[0]["x"][2]] == [ + (300, 300, 3), + (320, 320, 3), + ] WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_SCALARS = { "version": "1.1", "inputs": [], "steps": [ - { - "type": "ImageProducer", - "name": "image_producer_x", - "shape": (50, 50, 3) - }, - { - "type": "ImageProducer", - "name": "image_producer_y", - "shape": (60, 60, 3) - }, + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + {"type": "ImageProducer", "name": "image_producer_y", "shape": (60, 60, 3)}, { "type": "NonSIMDConsumerAcceptingDictDecDim", "name": "image_consumer", @@ -1778,11 +1861,7 @@ def test_workflow_with_non_simd_consumer_dec_dim_accepting_dict_of_batch_and_sca "x": "$inputs.image_1", "y": "$inputs.image_3", }, - { - "type": "ImageProducer", - "name": "image_producer_x", - "shape": (50, 50, 3) - }, + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, { "type": "NonSIMDConsumerAcceptingListDecDim", "name": "image_consumer", @@ -1791,7 +1870,7 @@ def test_workflow_with_non_simd_consumer_dec_dim_accepting_dict_of_batch_and_sca "$steps.image_producer_x.image", "$steps.dimensionality_boost.y", ], - "y": "some-value" + "y": "some-value", }, ], "outputs": [ @@ -1825,15 +1904,31 @@ def test_workflow_with_non_simd_consumer_dec_dim_accepting_list_of_batch_and_sca ) # when - results = execution_engine.run(runtime_parameters={ - "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], - "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], - }) + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) # then assert len(results) == 2 - assert [i.numpy_image.shape for i in results[0]["x"]] == [(100, 100, 3), (100, 100, 3), (50, 50, 3), (50, 50, 3), (300, 300, 3), (300, 300, 3)] - assert [i.numpy_image.shape for i in results[1]["x"]] == [(120, 120, 3), (120, 120, 3), (50, 50, 3), (50, 50, 3), (320, 320, 3), (320, 320, 3)] + assert [i.numpy_image.shape for i in results[0]["x"]] == [ + (100, 100, 3), + (100, 100, 3), + (50, 50, 3), + (50, 50, 3), + (300, 300, 3), + (300, 300, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"]] == [ + (120, 120, 3), + (120, 120, 3), + (50, 50, 3), + (50, 50, 3), + (320, 320, 3), + (320, 320, 3), + ] WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1 = { @@ -1843,16 +1938,16 @@ def test_workflow_with_non_simd_consumer_dec_dim_accepting_list_of_batch_and_sca {"type": "WorkflowImage", "name": "image_3"}, ], "steps": [ - { - "type": "ImageProducer", - "name": "image_producer_x", - "shape": (50, 50, 3) - }, + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, { "type": "NonSIMDConsumerAcceptingListDecDim", "name": "image_consumer", - "x": ["$inputs.image_1", "$steps.image_producer_x.image", "$inputs.image_3"], - "y": "some-value" + "x": [ + "$inputs.image_1", + "$steps.image_producer_x.image", + "$inputs.image_3", + ], + "y": "some-value", }, ], "outputs": [ @@ -1886,35 +1981,36 @@ def test_workflow_with_non_simd_consumer_dec_dim_accepting_list_of_batch_and_sca ) # when - results = execution_engine.run(runtime_parameters={ - "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], - "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], - }) + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) # then assert len(results) == 1 - assert [i.numpy_image.shape for i in results[0]["x"]] == [(100, 100, 3), (120, 120, 3), (50, 50, 3), (50, 50, 3), (300, 300, 3), (320, 320, 3)] + assert [i.numpy_image.shape for i in results[0]["x"]] == [ + (100, 100, 3), + (120, 120, 3), + (50, 50, 3), + (50, 50, 3), + (300, 300, 3), + (320, 320, 3), + ] WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_SCALARS = { "version": "1.1", "inputs": [], "steps": [ - { - "type": "ImageProducer", - "name": "image_producer_x", - "shape": (50, 50, 3) - }, - { - "type": "ImageProducer", - "name": "image_producer_y", - "shape": (60, 60, 3) - }, + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + {"type": "ImageProducer", "name": "image_producer_y", "shape": (60, 60, 3)}, { "type": "NonSIMDConsumerAcceptingListDecDim", "name": "image_consumer", "x": ["$steps.image_producer_x.image", "$steps.image_producer_y.image"], - "y": "some-value" + "y": "some-value", }, ], "outputs": [ @@ -1959,11 +2055,7 @@ def test_workflow_with_non_simd_consumer_dec_dim_accepting_list_of_batch_and_sca "version": "1.1", "inputs": [], "steps": [ - { - "type": "ImageProducer", - "name": "image_producer_x", - "shape": (100, 100, 3) - }, + {"type": "ImageProducer", "name": "image_producer_x", "shape": (100, 100, 3)}, { "type": "ImageProducer", "name": "image_producer_y", @@ -1983,8 +2075,8 @@ def test_workflow_with_non_simd_consumer_dec_dim_accepting_list_of_batch_and_sca { "type": "BoostDimensionality", "name": "dimensionality_boost", - "x": "$steps.image_consumer.x" - } + "x": "$steps.image_consumer.x", + }, ], "outputs": [ { @@ -2026,9 +2118,15 @@ def test_workflow_with_simd_consumers_accepting_list_of_scalar_selector( # then assert len(results) == 2, "Expected dim increase to happen" - assert [i.numpy_image.shape for i in results[0]["x"]] == [(100, 100, 3), (200, 200, 3)] + assert [i.numpy_image.shape for i in results[0]["x"]] == [ + (100, 100, 3), + (200, 200, 3), + ] assert [i.numpy_image.shape for i in results[0]["y"]] == [(300, 300, 3)] - assert [i.numpy_image.shape for i in results[1]["x"]] == [(100, 100, 3), (200, 200, 3)] + assert [i.numpy_image.shape for i in results[1]["x"]] == [ + (100, 100, 3), + (200, 200, 3), + ] assert [i.numpy_image.shape for i in results[1]["y"]] == [(300, 300, 3)] @@ -2083,17 +2181,25 @@ def test_workflow_with_simd_consumers_accepting_list_of_batch_selector( ) # when - results = execution_engine.run(runtime_parameters={ - "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], - "image_2": [np.zeros((200, 200, 3)), np.zeros((220, 220, 3))], - "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], - }) + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_2": [np.zeros((200, 200, 3)), np.zeros((220, 220, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) # then assert len(results) == 2 - assert [i.numpy_image.shape for i in results[0]["x"]] == [(100, 100, 3), (200, 200, 3)] + assert [i.numpy_image.shape for i in results[0]["x"]] == [ + (100, 100, 3), + (200, 200, 3), + ] assert [i.numpy_image.shape for i in results[0]["y"]] == [(300, 300, 3)] - assert [i.numpy_image.shape for i in results[1]["x"]] == [(120, 120, 3), (220, 220, 3)] + assert [i.numpy_image.shape for i in results[1]["x"]] == [ + (120, 120, 3), + (220, 220, 3), + ] assert [i.numpy_image.shape for i in results[1]["y"]] == [(320, 320, 3)] @@ -2104,11 +2210,7 @@ def test_workflow_with_simd_consumers_accepting_list_of_batch_selector( {"type": "WorkflowImage", "name": "image_3"}, ], "steps": [ - { - "type": "ImageProducer", - "name": "image_producer_x", - "shape": (50, 50, 3) - }, + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, { "type": "SIMDConsumerAcceptingList", "name": "image_consumer", @@ -2152,16 +2254,24 @@ def test_workflow_with_simd_consumers_accepting_list_of_batch_and_scalar_selecto ) # when - results = execution_engine.run(runtime_parameters={ - "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], - "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], - }) + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) # then assert len(results) == 2 - assert [i.numpy_image.shape for i in results[0]["x"]] == [(100, 100, 3), (50, 50, 3)] + assert [i.numpy_image.shape for i in results[0]["x"]] == [ + (100, 100, 3), + (50, 50, 3), + ] assert [i.numpy_image.shape for i in results[0]["y"]] == [(300, 300, 3)] - assert [i.numpy_image.shape for i in results[1]["x"]] == [(120, 120, 3), (50, 50, 3)] + assert [i.numpy_image.shape for i in results[1]["x"]] == [ + (120, 120, 3), + (50, 50, 3), + ] assert [i.numpy_image.shape for i in results[1]["y"]] == [(320, 320, 3)] @@ -2172,18 +2282,14 @@ def test_workflow_with_simd_consumers_accepting_list_of_batch_and_scalar_selecto {"type": "WorkflowImage", "name": "image_3"}, ], "steps": [ - { - "type": "ImageProducer", - "name": "image_producer_x", - "shape": (50, 50, 3) - }, + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, { "type": "SIMDConsumerAcceptingDict", "name": "image_consumer", "x": { "a": "$inputs.image_1", "b": "$steps.image_producer_x.image", - "c": "$inputs.image_3" + "c": "$inputs.image_3", }, }, ], @@ -2218,15 +2324,25 @@ def test_workflow_with_simd_consumers_accepting_dict_of_batch_and_scalar_selecto ) # when - results = execution_engine.run(runtime_parameters={ - "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], - "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], - }) + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) # then assert len(results) == 2 - assert [i.numpy_image.shape for i in results[0]["x"]] == [(100, 100, 3), (50, 50, 3), (300, 300, 3)] - assert [i.numpy_image.shape for i in results[1]["x"]] == [(120, 120, 3), (50, 50, 3), (320, 320, 3)] + assert [i.numpy_image.shape for i in results[0]["x"]] == [ + (100, 100, 3), + (50, 50, 3), + (300, 300, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"]] == [ + (120, 120, 3), + (50, 50, 3), + (320, 320, 3), + ] WORKFLOW_WITH_SIMD_CUSTOMER_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2 = { @@ -2242,11 +2358,7 @@ def test_workflow_with_simd_consumers_accepting_dict_of_batch_and_scalar_selecto "x": "$inputs.image_1", "y": "$inputs.image_3", }, - { - "type": "ImageProducer", - "name": "image_producer_x", - "shape": (50, 50, 3) - }, + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, { "type": "SIMDConsumerAcceptingDict", "name": "image_consumer", @@ -2288,17 +2400,35 @@ def test_workflow_with_simd_consumers_accepting_dict_of_batch_and_scalar_selecto ) # when - results = execution_engine.run(runtime_parameters={ - "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], - "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], - }) + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) # then assert len(results) == 2 - assert [i.numpy_image.shape for i in results[0]["x"][0]] == [(100, 100, 3), (50, 50, 3), (300, 300, 3)] - assert [i.numpy_image.shape for i in results[0]["x"][1]] == [(100, 100, 3), (50, 50, 3), (300, 300, 3)] - assert [i.numpy_image.shape for i in results[1]["x"][0]] == [(120, 120, 3), (50, 50, 3), (320, 320, 3)] - assert [i.numpy_image.shape for i in results[1]["x"][1]] == [(120, 120, 3), (50, 50, 3), (320, 320, 3)] + assert [i.numpy_image.shape for i in results[0]["x"][0]] == [ + (100, 100, 3), + (50, 50, 3), + (300, 300, 3), + ] + assert [i.numpy_image.shape for i in results[0]["x"][1]] == [ + (100, 100, 3), + (50, 50, 3), + (300, 300, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"][0]] == [ + (120, 120, 3), + (50, 50, 3), + (320, 320, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"][1]] == [ + (120, 120, 3), + (50, 50, 3), + (320, 320, 3), + ] ######### ============ @@ -2317,11 +2447,7 @@ def test_workflow_with_simd_consumers_accepting_dict_of_batch_and_scalar_selecto "x": "$inputs.image_1", "y": "$inputs.image_3", }, - { - "type": "ImageProducer", - "name": "image_producer_x", - "shape": (50, 50, 3) - }, + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, { "type": "SIMDConsumerAcceptingDictIncDim", "name": "image_consumer", @@ -2363,21 +2489,55 @@ def test_workflow_with_simd_consumer_inc_dim_accepting_dict_of_batch_and_scalar_ ) # when - results = execution_engine.run(runtime_parameters={ - "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], - "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], - }) + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) # then assert len(results) == 2 - assert [i.numpy_image.shape for i in results[0]["x"][0][0]] == [(100, 100, 3), (50, 50, 3), (300, 300, 3)] - assert [i.numpy_image.shape for i in results[0]["x"][0][1]] == [(100, 100, 3), (50, 50, 3), (300, 300, 3)] - assert [i.numpy_image.shape for i in results[0]["x"][1][0]] == [(100, 100, 3), (50, 50, 3), (300, 300, 3)] - assert [i.numpy_image.shape for i in results[0]["x"][1][1]] == [(100, 100, 3), (50, 50, 3), (300, 300, 3)] - assert [i.numpy_image.shape for i in results[1]["x"][0][0]] == [(120, 120, 3), (50, 50, 3), (320, 320, 3)] - assert [i.numpy_image.shape for i in results[1]["x"][0][1]] == [(120, 120, 3), (50, 50, 3), (320, 320, 3)] - assert [i.numpy_image.shape for i in results[1]["x"][1][0]] == [(120, 120, 3), (50, 50, 3), (320, 320, 3)] - assert [i.numpy_image.shape for i in results[1]["x"][1][1]] == [(120, 120, 3), (50, 50, 3), (320, 320, 3)] + assert [i.numpy_image.shape for i in results[0]["x"][0][0]] == [ + (100, 100, 3), + (50, 50, 3), + (300, 300, 3), + ] + assert [i.numpy_image.shape for i in results[0]["x"][0][1]] == [ + (100, 100, 3), + (50, 50, 3), + (300, 300, 3), + ] + assert [i.numpy_image.shape for i in results[0]["x"][1][0]] == [ + (100, 100, 3), + (50, 50, 3), + (300, 300, 3), + ] + assert [i.numpy_image.shape for i in results[0]["x"][1][1]] == [ + (100, 100, 3), + (50, 50, 3), + (300, 300, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"][0][0]] == [ + (120, 120, 3), + (50, 50, 3), + (320, 320, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"][0][1]] == [ + (120, 120, 3), + (50, 50, 3), + (320, 320, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"][1][0]] == [ + (120, 120, 3), + (50, 50, 3), + (320, 320, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"][1][1]] == [ + (120, 120, 3), + (50, 50, 3), + (320, 320, 3), + ] WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2 = { @@ -2393,11 +2553,7 @@ def test_workflow_with_simd_consumer_inc_dim_accepting_dict_of_batch_and_scalar_ "x": "$inputs.image_1", "y": "$inputs.image_3", }, - { - "type": "ImageProducer", - "name": "image_producer_x", - "shape": (50, 50, 3) - }, + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, { "type": "SIMDConsumerAcceptingDictDecDim", "name": "image_consumer", @@ -2439,36 +2595,51 @@ def test_workflow_with_simd_consumer_dec_dim_accepting_dict_of_batch_and_scalar_ ) # when - results = execution_engine.run(runtime_parameters={ - "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], - "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], - }) + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) # then - print(results) assert len(results) == 2 - assert [i.numpy_image.shape for i in results[0]["x"][0]] == [(100, 100, 3), (100, 100, 3)] - assert [i.numpy_image.shape for i in results[0]["x"][1]] == [(50, 50, 3), (50, 50, 3)] - assert [i.numpy_image.shape for i in results[0]["x"][2]] == [(300, 300, 3), (300, 300, 3)] - assert [i.numpy_image.shape for i in results[1]["x"][0]] == [(120, 120, 3), (120, 120, 3)] - assert [i.numpy_image.shape for i in results[1]["x"][1]] == [(50, 50, 3), (50, 50, 3)] - assert [i.numpy_image.shape for i in results[1]["x"][2]] == [(320, 320, 3), (320, 320, 3)] + assert [i.numpy_image.shape for i in results[0]["x"][0]] == [ + (100, 100, 3), + (100, 100, 3), + ] + assert [i.numpy_image.shape for i in results[0]["x"][1]] == [ + (50, 50, 3), + (50, 50, 3), + ] + assert [i.numpy_image.shape for i in results[0]["x"][2]] == [ + (300, 300, 3), + (300, 300, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"][0]] == [ + (120, 120, 3), + (120, 120, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"][1]] == [ + (50, 50, 3), + (50, 50, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"][2]] == [ + (320, 320, 3), + (320, 320, 3), + ] -WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1 = { +WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1 = { "version": "1.1", "inputs": [ {"type": "WorkflowImage", "name": "image_1"}, {"type": "WorkflowImage", "name": "image_3"}, ], "steps": [ + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, { - "type": "ImageProducer", - "name": "image_producer_x", - "shape": (50, 50, 3) - }, - { - "type": "NonSIMDConsumerAcceptingDictDecDim", + "type": "SIMDConsumerAcceptingDictDecDim", "name": "image_consumer", "x": { "a": "$inputs.image_1", @@ -2488,7 +2659,7 @@ def test_workflow_with_simd_consumer_dec_dim_accepting_dict_of_batch_and_scalar_ @mock.patch.object(blocks_loader, "get_plugin_modules") -def test_workflow_with_non_simd_consumer_dec_dim_accepting_dict_of_batch_and_scalar_selector_when_batch_at_dim_1( +def test_workflow_with_simd_consumer_dec_dim_accepting_dict_of_batch_and_scalar_selector_when_batch_at_dim_1( get_plugin_modules_mock: MagicMock, model_manager: ModelManager, ) -> None: @@ -2502,40 +2673,43 @@ def test_workflow_with_non_simd_consumer_dec_dim_accepting_dict_of_batch_and_sca "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, } execution_engine = ExecutionEngine.init( - workflow_definition=WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1, + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1, init_parameters=workflow_init_parameters, max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, ) # when - results = execution_engine.run(runtime_parameters={ - "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], - "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], - }) + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) # then assert len(results) == 1 - assert [i.numpy_image.shape for i in results[0]["x"][0]] == [(100, 100, 3), (120, 120, 3)] - assert [i.numpy_image.shape for i in results[0]["x"][1]] == [(50, 50, 3), (50, 50, 3)] - assert [i.numpy_image.shape for i in results[0]["x"][2]] == [(300, 300, 3), (320, 320, 3)] + assert [i.numpy_image.shape for i in results[0]["x"][0]] == [ + (100, 100, 3), + (120, 120, 3), + ] + assert [i.numpy_image.shape for i in results[0]["x"][1]] == [ + (50, 50, 3), + (50, 50, 3), + ] + assert [i.numpy_image.shape for i in results[0]["x"][2]] == [ + (300, 300, 3), + (320, 320, 3), + ] -WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_SCALARS = { +WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_SCALARS = { "version": "1.1", "inputs": [], "steps": [ + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + {"type": "ImageProducer", "name": "image_producer_y", "shape": (60, 60, 3)}, { - "type": "ImageProducer", - "name": "image_producer_x", - "shape": (50, 50, 3) - }, - { - "type": "ImageProducer", - "name": "image_producer_y", - "shape": (60, 60, 3) - }, - { - "type": "NonSIMDConsumerAcceptingDictDecDim", + "type": "SIMDConsumerAcceptingDictDecDim", "name": "image_consumer", "x": { "a": "$steps.image_producer_x.image", @@ -2554,7 +2728,7 @@ def test_workflow_with_non_simd_consumer_dec_dim_accepting_dict_of_batch_and_sca @mock.patch.object(blocks_loader, "get_plugin_modules") -def test_workflow_with_non_simd_consumer_dec_dim_accepting_dict_of_batch_and_scalar_selectors( +def test_workflow_with_simd_consumer_dec_dim_accepting_dict_of_batch_and_scalar_selectors( get_plugin_modules_mock: MagicMock, model_manager: ModelManager, ) -> None: @@ -2568,7 +2742,7 @@ def test_workflow_with_non_simd_consumer_dec_dim_accepting_dict_of_batch_and_sca "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, } execution_engine = ExecutionEngine.init( - workflow_definition=WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_SCALARS, + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_SCALARS, init_parameters=workflow_init_parameters, max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, ) @@ -2582,7 +2756,7 @@ def test_workflow_with_non_simd_consumer_dec_dim_accepting_dict_of_batch_and_sca assert [i.numpy_image.shape for i in results[0]["x"][1]] == [(60, 60, 3)] -WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2 = { +WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2 = { "version": "1.1", "inputs": [ {"type": "WorkflowImage", "name": "image_1"}, @@ -2595,20 +2769,16 @@ def test_workflow_with_non_simd_consumer_dec_dim_accepting_dict_of_batch_and_sca "x": "$inputs.image_1", "y": "$inputs.image_3", }, + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, { - "type": "ImageProducer", - "name": "image_producer_x", - "shape": (50, 50, 3) - }, - { - "type": "NonSIMDConsumerAcceptingListDecDim", + "type": "SIMDConsumerAcceptingListDecDim", "name": "image_consumer", "x": [ "$steps.dimensionality_boost.x", "$steps.image_producer_x.image", "$steps.dimensionality_boost.y", ], - "y": "some-value" + "y": "some-value", }, ], "outputs": [ @@ -2622,7 +2792,7 @@ def test_workflow_with_non_simd_consumer_dec_dim_accepting_dict_of_batch_and_sca @mock.patch.object(blocks_loader, "get_plugin_modules") -def test_workflow_with_non_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_selector_when_batch_at_dim_2( +def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_selector_when_batch_at_dim_2( get_plugin_modules_mock: MagicMock, model_manager: ModelManager, ) -> None: @@ -2636,40 +2806,56 @@ def test_workflow_with_non_simd_consumer_dec_dim_accepting_list_of_batch_and_sca "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, } execution_engine = ExecutionEngine.init( - workflow_definition=WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2, + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2, init_parameters=workflow_init_parameters, max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, ) # when - results = execution_engine.run(runtime_parameters={ - "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], - "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], - }) + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) # then assert len(results) == 2 - assert [i.numpy_image.shape for i in results[0]["x"]] == [(100, 100, 3), (100, 100, 3), (50, 50, 3), (50, 50, 3), (300, 300, 3), (300, 300, 3)] - assert [i.numpy_image.shape for i in results[1]["x"]] == [(120, 120, 3), (120, 120, 3), (50, 50, 3), (50, 50, 3), (320, 320, 3), (320, 320, 3)] + assert [i.numpy_image.shape for i in results[0]["x"]] == [ + (100, 100, 3), + (100, 100, 3), + (50, 50, 3), + (50, 50, 3), + (300, 300, 3), + (300, 300, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"]] == [ + (120, 120, 3), + (120, 120, 3), + (50, 50, 3), + (50, 50, 3), + (320, 320, 3), + (320, 320, 3), + ] -WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1 = { +WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1 = { "version": "1.1", "inputs": [ {"type": "WorkflowImage", "name": "image_1"}, {"type": "WorkflowImage", "name": "image_3"}, ], "steps": [ + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, { - "type": "ImageProducer", - "name": "image_producer_x", - "shape": (50, 50, 3) - }, - { - "type": "NonSIMDConsumerAcceptingListDecDim", + "type": "SIMDConsumerAcceptingListDecDim", "name": "image_consumer", - "x": ["$inputs.image_1", "$steps.image_producer_x.image", "$inputs.image_3"], - "y": "some-value" + "x": [ + "$inputs.image_1", + "$steps.image_producer_x.image", + "$inputs.image_3", + ], + "y": "some-value", }, ], "outputs": [ @@ -2683,7 +2869,7 @@ def test_workflow_with_non_simd_consumer_dec_dim_accepting_list_of_batch_and_sca @mock.patch.object(blocks_loader, "get_plugin_modules") -def test_workflow_with_non_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_selector_when_batch_at_dim_1( +def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_selector_when_batch_at_dim_1( get_plugin_modules_mock: MagicMock, model_manager: ModelManager, ) -> None: @@ -2697,41 +2883,42 @@ def test_workflow_with_non_simd_consumer_dec_dim_accepting_list_of_batch_and_sca "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, } execution_engine = ExecutionEngine.init( - workflow_definition=WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1, + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1, init_parameters=workflow_init_parameters, max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, ) # when - results = execution_engine.run(runtime_parameters={ - "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], - "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], - }) + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) # then assert len(results) == 1 - assert [i.numpy_image.shape for i in results[0]["x"]] == [(100, 100, 3), (120, 120, 3), (50, 50, 3), (50, 50, 3), (300, 300, 3), (320, 320, 3)] + assert [i.numpy_image.shape for i in results[0]["x"]] == [ + (100, 100, 3), + (120, 120, 3), + (50, 50, 3), + (50, 50, 3), + (300, 300, 3), + (320, 320, 3), + ] -WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_SCALARS = { +WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_SCALARS = { "version": "1.1", "inputs": [], "steps": [ + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + {"type": "ImageProducer", "name": "image_producer_y", "shape": (60, 60, 3)}, { - "type": "ImageProducer", - "name": "image_producer_x", - "shape": (50, 50, 3) - }, - { - "type": "ImageProducer", - "name": "image_producer_y", - "shape": (60, 60, 3) - }, - { - "type": "NonSIMDConsumerAcceptingListDecDim", + "type": "SIMDConsumerAcceptingListDecDim", "name": "image_consumer", "x": ["$steps.image_producer_x.image", "$steps.image_producer_y.image"], - "y": "some-value" + "y": "some-value", }, ], "outputs": [ @@ -2745,7 +2932,7 @@ def test_workflow_with_non_simd_consumer_dec_dim_accepting_list_of_batch_and_sca @mock.patch.object(blocks_loader, "get_plugin_modules") -def test_workflow_with_non_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_selectors( +def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_selectors( get_plugin_modules_mock: MagicMock, model_manager: ModelManager, ) -> None: @@ -2759,7 +2946,7 @@ def test_workflow_with_non_simd_consumer_dec_dim_accepting_list_of_batch_and_sca "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, } execution_engine = ExecutionEngine.init( - workflow_definition=WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_SCALARS, + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_SCALARS, init_parameters=workflow_init_parameters, max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, ) @@ -2770,4 +2957,3 @@ def test_workflow_with_non_simd_consumer_dec_dim_accepting_list_of_batch_and_sca # then assert len(results) == 1 assert [i.numpy_image.shape for i in results[0]["x"]] == [(50, 50, 3), (60, 60, 3)] - diff --git a/tests/workflows/unit_tests/core_steps/transformations/test_qr_code_generator.py b/tests/workflows/unit_tests/core_steps/transformations/test_qr_code_generator.py index b2ba19cd09..bdd79fe4f6 100644 --- a/tests/workflows/unit_tests/core_steps/transformations/test_qr_code_generator.py +++ b/tests/workflows/unit_tests/core_steps/transformations/test_qr_code_generator.py @@ -1,5 +1,5 @@ -import pytest import numpy as np +import pytest from inference.core.workflows.core_steps.transformations.qr_code_generator.v1 import ( QRCodeGeneratorBlockV1, @@ -12,44 +12,44 @@ class TestQRCodeGeneratorBlockV1: def test_qr_code_generator_block_manifest(self): # given block = QRCodeGeneratorBlockV1() - + # when manifest_class = block.get_manifest() outputs = manifest_class.describe_outputs() - + # then assert outputs[0].name == "qr_code" - assert hasattr(manifest_class, '__fields__') - assert 'type' in manifest_class.__fields__ - + assert hasattr(manifest_class, "__fields__") + assert "type" in manifest_class.__fields__ + def test_qr_code_generator_run_basic(self): # given block = QRCodeGeneratorBlockV1() text = "https://roboflow.com" - + # when result = block.run(text=text) - + # then assert "qr_code" in result assert isinstance(result["qr_code"], WorkflowImageData) assert result["qr_code"].numpy_image.shape[2] == 3 # RGB channels assert result["qr_code"].numpy_image.dtype == np.uint8 - + def test_qr_code_generator_run_with_parameters(self): # given block = QRCodeGeneratorBlockV1() text = "Test" - + # when - version and box_size are now hardcoded per spec result = block.run( text=text, error_correct="High (~30% word recovery / lowest data capacity)", border=2, fill_color="blue", - back_color="yellow" + back_color="yellow", ) - + # then assert "qr_code" in result assert isinstance(result["qr_code"], WorkflowImageData) @@ -60,136 +60,152 @@ class TestGenerateQRCode: def test_generate_qr_code_basic(self): # given text = "https://example.com" - + # when result = generate_qr_code(text=text) - + # then assert isinstance(result, WorkflowImageData) assert result.numpy_image.shape[2] == 3 # RGB channels assert result.numpy_image.dtype == np.uint8 assert result.numpy_image.shape[0] > 0 # Has height assert result.numpy_image.shape[1] > 0 # Has width - + def test_generate_qr_code_with_hardcoded_defaults(self): # given text = "Test" - + # when - version and box_size are now hardcoded result = generate_qr_code(text=text, version=1, box_size=10) - + # then assert isinstance(result, WorkflowImageData) # Version 1 QR code with box_size=10 and border=4 should be (21+8)*10 = 290 pixels expected_size = (21 + 2 * 4) * 10 # 290 assert result.numpy_image.shape[0] == expected_size assert result.numpy_image.shape[1] == expected_size - + def test_generate_qr_code_auto_version(self): # given text = "Test with auto version" - + # when result = generate_qr_code(text=text, version=None) - + # then assert isinstance(result, WorkflowImageData) assert result.numpy_image.shape[0] > 0 assert result.numpy_image.shape[1] > 0 - + def test_generate_qr_code_error_correction_levels(self): # given text = "Error correction test" - + # when/then - should not raise errors for valid display name levels for level in [ "Low (~7% word recovery / highest data capacity)", "Medium (~15% word recovery)", "Quartile (~25% word recovery)", - "High (~30% word recovery / lowest data capacity)" + "High (~30% word recovery / lowest data capacity)", ]: result = generate_qr_code(text=text, error_correct=level) assert isinstance(result, WorkflowImageData) - + def test_generate_qr_code_invalid_error_correction(self): # given text = "Test" - + # when result = generate_qr_code(text=text, error_correct="INVALID") - + # then - should default to ERROR_CORRECT_M assert isinstance(result, WorkflowImageData) - + def test_generate_qr_code_color_parsing(self): # given text = "Color test" - + # when/then - should handle various color formats # Test with standard color names (case-insensitive) result1 = generate_qr_code(text=text, fill_color="black", back_color="white") assert isinstance(result1, WorkflowImageData) - + # Test with uppercase standard names (matches supervision constants) result2 = generate_qr_code(text=text, fill_color="BLACK", back_color="WHITE") assert isinstance(result2, WorkflowImageData) - + # Test with hex colors - result3 = generate_qr_code(text=text, fill_color="#FF0000", back_color="#00FF00") + result3 = generate_qr_code( + text=text, fill_color="#FF0000", back_color="#00FF00" + ) assert isinstance(result3, WorkflowImageData) - + # Test with rgb format - result4 = generate_qr_code(text=text, fill_color="rgb(255, 0, 0)", back_color="rgb(0, 255, 0)") + result4 = generate_qr_code( + text=text, fill_color="rgb(255, 0, 0)", back_color="rgb(0, 255, 0)" + ) assert isinstance(result4, WorkflowImageData) - + # Test with CSS3 color names (fallback) - result5 = generate_qr_code(text=text, fill_color="mediumpurple", back_color="lightblue") + result5 = generate_qr_code( + text=text, fill_color="mediumpurple", back_color="lightblue" + ) assert isinstance(result5, WorkflowImageData) - + def test_generate_qr_code_supervision_color_compatibility(self): """Test that all supervision standard colors work with QR code generation.""" # given text = "Supervision color test" - + # Test all standard supervision colors - standard_colors = ["BLACK", "WHITE", "RED", "GREEN", "BLUE", "YELLOW", "ROBOFLOW"] - + standard_colors = [ + "BLACK", + "WHITE", + "RED", + "GREEN", + "BLUE", + "YELLOW", + "ROBOFLOW", + ] + for color_name in standard_colors: # when - using supervision standard color names - result = generate_qr_code(text=text, fill_color=color_name, back_color="WHITE") - + result = generate_qr_code( + text=text, fill_color=color_name, back_color="WHITE" + ) + # then - should successfully generate QR code assert isinstance(result, WorkflowImageData) assert result.numpy_image is not None assert result.numpy_image.shape[2] == 3 # RGB image - + # Test mixed formats to ensure conversions work result_mixed = generate_qr_code( - text=text, + text=text, fill_color="ROBOFLOW", # supervision constant - back_color="#FFFFFF" # hex format + back_color="#FFFFFF", # hex format ) assert isinstance(result_mixed, WorkflowImageData) - + def test_generate_qr_code_box_size_and_border(self): # given text = "Size test" - + # when - testing with different parameters (function still accepts them) result_small = generate_qr_code(text=text, version=1, box_size=5, border=2) result_large = generate_qr_code(text=text, version=1, box_size=15, border=6) - + # then assert result_small.numpy_image.shape[0] < result_large.numpy_image.shape[0] assert result_small.numpy_image.shape[1] < result_large.numpy_image.shape[1] - + def test_generate_qr_code_empty_text(self): # given text = "" - + # when result = generate_qr_code(text=text) - + # then assert isinstance(result, WorkflowImageData) assert result.numpy_image.shape[0] > 0 @@ -198,19 +214,19 @@ def test_generate_qr_code_empty_text(self): @pytest.mark.skipif( True, # Skip until qrcode dependency is resolved in CI - reason="qrcode library may not be available in test environment" + reason="qrcode library may not be available in test environment", ) class TestQRCodeGeneratorIntegration: def test_qr_code_format_is_png_compatible(self): # given text = "https://roboflow.com" - + # when result = generate_qr_code(text=text) - + # then # Verify the image can be used by other workflow blocks assert isinstance(result, WorkflowImageData) assert result.numpy_image.dtype == np.uint8 assert len(result.numpy_image.shape) == 3 - assert result.numpy_image.shape[2] == 3 # RGB format expected by IconVisualizer \ No newline at end of file + assert result.numpy_image.shape[2] == 3 # RGB format expected by IconVisualizer From c6a7ab5b4940cc96756c6585c75bdabef1e28b8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Fri, 22 Aug 2025 17:04:45 +0200 Subject: [PATCH 09/20] Finish tests and adjustments for conditional execution --- inference/core/workflows/errors.py | 2 +- .../v1/compiler/syntactic_parser.py | 1 - .../execution_data_manager/manager.py | 25 +- .../step_input_assembler.py | 72 +- .../plugin_image_producer/__init__.py | 91 +- ...ng_scalars_to_fit_into_batch_parameters.py | 870 +++++++++++++++++- 6 files changed, 1011 insertions(+), 50 deletions(-) diff --git a/inference/core/workflows/errors.py b/inference/core/workflows/errors.py index ff5c9d3da5..f420312849 100644 --- a/inference/core/workflows/errors.py +++ b/inference/core/workflows/errors.py @@ -4,7 +4,7 @@ class WorkflowBlockError(BaseModel): - block_id: str + block_id: Optional[str] = None block_type: Optional[str] = None block_details: Optional[str] = None property_name: Optional[str] = None diff --git a/inference/core/workflows/execution_engine/v1/compiler/syntactic_parser.py b/inference/core/workflows/execution_engine/v1/compiler/syntactic_parser.py index 9cbac90cf5..15986b2392 100644 --- a/inference/core/workflows/execution_engine/v1/compiler/syntactic_parser.py +++ b/inference/core/workflows/execution_engine/v1/compiler/syntactic_parser.py @@ -77,7 +77,6 @@ def parse_workflow_definition( property_name = None if len(loc) > 3 and loc[2] == element_type: property_name = str(loc[3]) - block_error = WorkflowBlockError( block_id=element_name, block_type=element_type, diff --git a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py index 489cc6ca3d..90b2b331b6 100644 --- a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py +++ b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py @@ -180,6 +180,7 @@ def register_non_simd_step_output( context="workflow_execution | step_output_registration", ) if isinstance(output, FlowControl): + print("FLOW", step_node.name, output) self._register_flow_control_output_for_non_simd_step( step_node=step_node, output=output, @@ -273,6 +274,10 @@ def register_simd_step_output( print("outputs", outputs) # SIMD step collapsing into scalar (can happen for auto-batch casting of parameters) if isinstance(outputs, list): + if len(outputs) == 0: + print("TERMINATING", step_selector) + # termination of the computation as in NON-SIMD case + return None if len(outputs) != 1: raise ExecutionEngineRuntimeError( public_message=f"Error in execution engine. In context of SIMD step: {step_selector} attempts to " @@ -483,26 +488,6 @@ def get_batch_data( context="workflow_execution | step_output_registration", ) - # def should_simd_step_output_be_casted_to_scalar(self, step_selector: str) -> bool: - # if not self.is_step_simd(step_selector=step_selector): - # raise AssumptionError( - # public_message=f"Error in execution engine. Attempted to verify SIMD step output auto-casting to " - # f"scalar for step {step_selector} which is not registered as SIMD step. " - # f"This is most likely bug. Contact Roboflow team through github issues " - # f"(https://github.com/roboflow/inference/issues) providing full context of" - # f"the problem - including workflow definition you use.", - # context="workflow_execution | step_output_registration", - # ) - # step_node_data = node_as( - # execution_graph=self._execution_graph, - # node=step_selector, - # expected_type=StepNode, - # ) - # if not step_node_data.scalar_parameters_to_be_batched: - # return False - # a = step_node_data.output_dimensionality - step_node_data.step_execution_dimensionality - # return len(step_node_data.batch_oriented_parameters) == 0 - def is_step_simd(self, step_selector: str) -> bool: step_node_data = node_as( execution_graph=self._execution_graph, diff --git a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py index 457ac4df7c..d6cb3588dd 100644 --- a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py +++ b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py @@ -246,7 +246,7 @@ def construct_simd_step_input( dynamic_batches_manager: DynamicBatchesManager, branching_manager: BranchingManager, ) -> BatchModeSIMDStepInput: - masks = construct_mask_for_all_inputs_dimensionalities( + masks, scalars_discarded = construct_mask_for_all_inputs_dimensionalities( step_node=step_node, branching_manager=branching_manager, ) @@ -254,6 +254,7 @@ def construct_simd_step_input( step_node=step_node, dynamic_batches_manager=dynamic_batches_manager, masks=masks, + scalars_discarded=scalars_discarded, runtime_parameters=runtime_parameters, execution_cache=execution_cache, ) @@ -262,33 +263,41 @@ def construct_simd_step_input( def construct_mask_for_all_inputs_dimensionalities( step_node: StepNode, branching_manager: BranchingManager, -) -> Any: +) -> Tuple[Any, bool]: + print(f"Collecting masks for: {step_node.name}") inputs_dimensionalities = collect_inputs_dimensionalities(step_node=step_node) all_dimensionalities = {dim for dim in inputs_dimensionalities.values() if dim > 0} + print("all_dimensionalities", all_dimensionalities) batch_masks, non_batch_masks = [], set() + print(f"Execution branches impacting inputs: {step_node.execution_branches_impacting_inputs}") for execution_branch in step_node.execution_branches_impacting_inputs: if not branching_manager.is_execution_branch_registered( execution_branch=execution_branch ): + print(f"EXECUTION BRANCH: {execution_branch} not registered") non_batch_masks.add(False) continue if branching_manager.is_execution_branch_batch_oriented( execution_branch=execution_branch ): mask = branching_manager.get_mask(execution_branch=execution_branch) + print(f"EXECUTION BRANCH: {execution_branch} is batch oriented - mask: {mask}") batch_masks.append(mask) else: mask = branching_manager.get_mask(execution_branch=execution_branch) + print(f"EXECUTION BRANCH: {execution_branch} is not batch oriented - mask: {mask}") non_batch_masks.add(mask) - if False in non_batch_masks: - return {dimension: set() for dimension in all_dimensionalities} + scalar_mask_contains_false = False in non_batch_masks + if scalar_mask_contains_false: + print("CANCELLING OUT!") + return {dimension: set() for dimension in all_dimensionalities}, scalar_mask_contains_false return { dimension: get_masks_intersection_up_to_dimension( batch_masks=batch_masks, dimension=dimension, ) for dimension in all_dimensionalities - } + }, scalar_mask_contains_false def collect_inputs_dimensionalities( @@ -356,9 +365,11 @@ def prepare_parameters( step_node: StepNode, dynamic_batches_manager: DynamicBatchesManager, masks: Dict[int, Optional[Set[DynamicBatchIndex]]], + scalars_discarded: bool, runtime_parameters: Dict[str, Any], execution_cache: ExecutionCache, ) -> BatchModeSIMDStepInput: + print(f"PREPARING PARAMS FOR: {step_node.name} - masks: {masks}") print("DDD", step_node.auto_batch_casting_lineage_supports) step_requests_batch_input = step_node.step_manifest.accepts_batch_input() result = {} @@ -376,6 +387,7 @@ def prepare_parameters( parameter=parameter_specs, step_execution_dimensionality=step_node.step_execution_dimensionality, masks=masks, + scalars_discarded=scalars_discarded, dynamic_batches_manager=dynamic_batches_manager, runtime_parameters=runtime_parameters, execution_cache=execution_cache, @@ -393,6 +405,7 @@ def prepare_parameters( parameter=parameter_specs, step_execution_dimensionality=step_node.step_execution_dimensionality, masks=masks, + scalars_discarded=scalars_discarded, dynamic_batches_manager=dynamic_batches_manager, runtime_parameters=runtime_parameters, execution_cache=execution_cache, @@ -438,6 +451,7 @@ def get_compound_parameter_value( parameter: CompoundStepInputDefinition, step_execution_dimensionality: int, masks: Dict[int, Optional[Set[DynamicBatchIndex]]], + scalars_discarded: bool, dynamic_batches_manager: DynamicBatchesManager, runtime_parameters: Dict[str, Any], execution_cache: ExecutionCache, @@ -458,6 +472,7 @@ def get_compound_parameter_value( parameter=nested_element, step_execution_dimensionality=step_execution_dimensionality, masks=masks, + scalars_discarded=scalars_discarded, dynamic_batches_manager=dynamic_batches_manager, runtime_parameters=runtime_parameters, execution_cache=execution_cache, @@ -483,6 +498,7 @@ def get_compound_parameter_value( parameter=nested_element, step_execution_dimensionality=step_execution_dimensionality, masks=masks, + scalars_discarded=scalars_discarded, dynamic_batches_manager=dynamic_batches_manager, runtime_parameters=runtime_parameters, execution_cache=execution_cache, @@ -510,6 +526,7 @@ def get_non_compound_parameter_value( parameter: StepInputDefinition, step_execution_dimensionality: int, masks: Dict[int, Optional[Set[DynamicBatchIndex]]], + scalars_discarded: bool, dynamic_batches_manager: DynamicBatchesManager, runtime_parameters: Dict[str, Any], execution_cache: ExecutionCache, @@ -541,6 +558,8 @@ def get_non_compound_parameter_value( step_execution_dimensionality=step_execution_dimensionality, guard_of_indices_wrapping=guard_of_indices_wrapping, step_requests_batch_input=step_requests_batch_input, + masks=masks, + scalars_discarded=False, ) elif parameter.points_to_step_output(): input_parameter: DynamicStepInputDefinition = parameter # type: ignore @@ -561,24 +580,12 @@ def get_non_compound_parameter_value( step_execution_dimensionality=step_execution_dimensionality, guard_of_indices_wrapping=guard_of_indices_wrapping, step_requests_batch_input=step_requests_batch_input, + masks=masks, + scalars_discarded=scalars_discarded, ) else: static_input: StaticStepInputDefinition = parameter # type: ignore - if not requested_as_batch: - return static_input.value, None, False - else: - return apply_auto_batch_casting( - parameter_name=parameter.parameter_specification.parameter_name, - value=static_input.value, - auto_batch_casting_config=auto_batch_casting_lineage_supports[ - parameter.parameter_specification.parameter_name - ], - contains_empty_scalar_step_output_selector=False, - dynamic_batches_manager=dynamic_batches_manager, - step_execution_dimensionality=step_execution_dimensionality, - guard_of_indices_wrapping=guard_of_indices_wrapping, - step_requests_batch_input=step_requests_batch_input, - ) + return static_input.value, None, False dynamic_parameter: DynamicStepInputDefinition = parameter # type: ignore parameter_dimensionality = dynamic_parameter.get_dimensionality() lineage_indices = dynamic_batches_manager.get_indices_for_data_lineage( @@ -677,6 +684,8 @@ def apply_auto_batch_casting( step_execution_dimensionality: int, guard_of_indices_wrapping: GuardForIndicesWrapping, step_requests_batch_input: bool, + masks: Dict[int, Optional[Set[DynamicBatchIndex]]], + scalars_discarded: bool, ) -> Tuple[Any, List[DynamicBatchIndex], bool]: print( f"parameter_name: {parameter_name} - auto_batch_casting_config: {auto_batch_casting_config}" @@ -693,10 +702,25 @@ def apply_auto_batch_casting( if missing_dimensions > 0: padding = (0,) * missing_dimensions indices = [i + padding for i in indices] - batch_content = [value] * len(indices) + if scalars_discarded: + batch_content = [None] * len(indices) + elif auto_batch_casting_config.lineage_support is None: + batch_content = [value] * len(indices) + else: + support_dimensionality = len(auto_batch_casting_config.lineage_support) + mask_for_support_dimensionality = masks[support_dimensionality] + if mask_for_support_dimensionality is None: + batch_content = [value] * len(indices) + else: + batch_content = [] + for index in indices: + if index[:support_dimensionality] in mask_for_support_dimensionality: + batch_content.append(value) + else: + batch_content.append(None) created_batch = Batch(content=batch_content, indices=indices) if step_execution_dimensionality == auto_batch_casting_config.casted_dimensionality: - return created_batch, indices, contains_empty_scalar_step_output_selector + return created_batch, indices, contains_empty_scalar_step_output_selector or scalars_discarded if step_execution_dimensionality > auto_batch_casting_config.casted_dimensionality: raise ExecutionEngineRuntimeError( public_message=f"Detected a situation when parameter: " @@ -733,7 +757,7 @@ def apply_auto_batch_casting( ) if upper_level_lineage_dimensionality == 0 and not step_requests_batch_input: # for batch collapse into scalar - return created_batch, indices, contains_empty_scalar_step_output_selector + return created_batch, indices, contains_empty_scalar_step_output_selector or scalars_discarded if auto_batch_casting_config.lineage_support is None: upper_level_indices = [indices[0][:-1]] else: @@ -767,7 +791,7 @@ def apply_auto_batch_casting( data=batch_content, guard_of_indices_wrapping=guard_of_indices_wrapping, ) - return result, result.indices, contains_empty_scalar_step_output_selector + return result, result.indices, contains_empty_scalar_step_output_selector or scalars_discarded def _flatten_batch_oriented_inputs( diff --git a/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py b/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py index 1349bf2d80..f131f75283 100644 --- a/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py +++ b/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py @@ -15,8 +15,9 @@ from inference.core.workflows.execution_engine.entities.types import ( IMAGE_KIND, STRING_KIND, - Selector, + Selector, StepSelector, ) +from inference.core.workflows.execution_engine.v1.entities import FlowControl from inference.core.workflows.prototypes.block import ( BlockResult, WorkflowBlock, @@ -856,6 +857,91 @@ def run(self, x: Dict[str, Batch[Batch[Any]]]) -> BlockResult: return results +class AlwaysTerminateManifest(WorkflowBlockManifest): + type: Literal["AlwaysTerminate"] + x: Union[Selector(), Any] + next_steps: List[StepSelector] = Field( + description="Steps to execute if the condition evaluates to true.", + examples=[["$steps.on_true"]], + ) + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + +class AlwaysTerminateBlock(WorkflowBlock): + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return AlwaysTerminateManifest + + def run(self, x: Any, next_steps: List[StepSelector]) -> BlockResult: + return FlowControl(mode="terminate_branch") + + +class AlwaysPassManifest(WorkflowBlockManifest): + type: Literal["AlwaysPass"] + x: Union[Selector(), Any] + next_steps: List[StepSelector] = Field( + description="Steps to execute if the condition evaluates to true.", + examples=[["$steps.on_true"]], + ) + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + +class AlwaysPassBlock(WorkflowBlock): + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return AlwaysPassManifest + + def run(self, x: Any, next_steps: List[StepSelector]) -> BlockResult: + return FlowControl(mode="select_step", context=next_steps) + + +class EachSecondPassManifest(WorkflowBlockManifest): + type: Literal["EachSecondPass"] + x: Union[Selector(), Any] + next_steps: List[StepSelector] = Field( + description="Steps to execute if the condition evaluates to true.", + examples=[["$steps.on_true"]], + ) + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + +class EachSecondPassBlock(WorkflowBlock): + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return EachSecondPassManifest + + def __init__(self): + self._last_passed = False + + def run(self, x: Any, next_steps: List[StepSelector]) -> BlockResult: + if self._last_passed: + self._last_passed = False + return FlowControl(mode="terminate_branch") + self._last_passed = True + return FlowControl(mode="select_step", context=next_steps) + + def load_blocks() -> List[Type[WorkflowBlock]]: return [ ImageProducerBlock, @@ -882,4 +968,7 @@ def load_blocks() -> List[Type[WorkflowBlock]]: SIMDConsumerAcceptingDictIncDimBlock, SIMDConsumerAcceptingDictDecDimBlock, SIMDConsumerAcceptingListDecDimBlock, + AlwaysTerminateBlock, + AlwaysPassBlock, + EachSecondPassBlock, ] diff --git a/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py b/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py index 3376226ff0..0473dff6c8 100644 --- a/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py +++ b/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py @@ -2431,9 +2431,6 @@ def test_workflow_with_simd_consumers_accepting_dict_of_batch_and_scalar_selecto ] -######### ============ - - WORKFLOW_WITH_SIMD_CUSTOMER_INCREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2 = { "version": "1.1", "inputs": [ @@ -2957,3 +2954,870 @@ def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_ # then assert len(results) == 1 assert [i.numpy_image.shape for i in results[0]["x"]] == [(50, 50, 3), (60, 60, 3)] + + +TEST_WORKFLOW_WITH_FLOW_CONTROL_BLOCKING_SIMD_PRODUCER = { + "version": "1.1", + "inputs": [], + "steps": [ + {"type": "AlwaysTerminate", "name": "condition", "x": "dummy", "next_steps": ["$steps.image_producer_x"]}, + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + {"type": "ImageProducer", "name": "image_producer_y", "shape": (60, 60, 3)}, + { + "type": "SIMDConsumerAcceptingListDecDim", + "name": "image_consumer", + "x": ["$steps.image_producer_x.image", "$steps.image_producer_y.image"], + "y": "some-value", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_always_blocking_simd_producer( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=TEST_WORKFLOW_WITH_FLOW_CONTROL_BLOCKING_SIMD_PRODUCER, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run(runtime_parameters={}) + + # then + assert len(results) == 1 + assert results[0]["x"] is None + + +TEST_WORKFLOW_WITH_FLOW_CONTROL_PASSING_SIMD_PRODUCER = { + "version": "1.1", + "inputs": [], + "steps": [ + {"type": "AlwaysPass", "name": "condition", "x": "dummy", "next_steps": ["$steps.image_producer_x"]}, + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + {"type": "ImageProducer", "name": "image_producer_y", "shape": (60, 60, 3)}, + { + "type": "SIMDConsumerAcceptingListDecDim", + "name": "image_consumer", + "x": ["$steps.image_producer_x.image", "$steps.image_producer_y.image"], + "y": "some-value", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_always_passing_simd_producer( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=TEST_WORKFLOW_WITH_FLOW_CONTROL_PASSING_SIMD_PRODUCER, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run(runtime_parameters={}) + + # then + assert len(results) == 1 + assert [i.numpy_image.shape for i in results[0]["x"]] == [(50, 50, 3), (60, 60, 3)] + + +TEST_WORKFLOW_WITH_FLOW_CONTROL_BLOCKING_SIMD_PRODUCER_AFTER_PRODUCTION = { + "version": "1.1", + "inputs": [], + "steps": [ + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + {"type": "AlwaysTerminate", "name": "condition", "x": "$steps.image_producer_x.image", "next_steps": ["$steps.image_consumer"]}, + {"type": "ImageProducer", "name": "image_producer_y", "shape": (60, 60, 3)}, + { + "type": "SIMDConsumerAcceptingListDecDim", + "name": "image_consumer", + "x": ["$steps.image_producer_x.image", "$steps.image_producer_y.image"], + "y": "some-value", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_always_blocking_simd_producer_after_production( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=TEST_WORKFLOW_WITH_FLOW_CONTROL_BLOCKING_SIMD_PRODUCER_AFTER_PRODUCTION, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run(runtime_parameters={}) + + # then + assert len(results) == 1 + assert results[0]["x"] is None + + +TEST_WORKFLOW_WITH_FLOW_CONTROL_PASSING_SIMD_PRODUCER_AFTER_PRODUCTION = { + "version": "1.1", + "inputs": [], + "steps": [ + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + {"type": "AlwaysPass", "name": "condition", "x": "$steps.image_producer_x.image", "next_steps": ["$steps.image_consumer"]}, + {"type": "ImageProducer", "name": "image_producer_y", "shape": (60, 60, 3)}, + { + "type": "SIMDConsumerAcceptingListDecDim", + "name": "image_consumer", + "x": ["$steps.image_producer_x.image", "$steps.image_producer_y.image"], + "y": "some-value", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_always_passing_simd_producer_after_production( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=TEST_WORKFLOW_WITH_FLOW_CONTROL_PASSING_SIMD_PRODUCER_AFTER_PRODUCTION, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run(runtime_parameters={}) + + # then + assert len(results) == 1 + assert [i.numpy_image.shape for i in results[0]["x"]] == [(50, 50, 3), (60, 60, 3)] + + + +WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1_AND_FLOW_CONTROLL = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + {"type": "EachSecondPass", "name": "condition", "x": "$inputs.image_1", "next_steps": ["$steps.image_consumer"]}, + { + "type": "SIMDConsumerAcceptingListDecDim", + "name": "image_consumer", + "x": [ + "$inputs.image_1", + "$steps.image_producer_x.image", + "$inputs.image_3", + ], + "y": "some-value", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_selector_when_batch_at_dim_1_and_flow_controll( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1_AND_FLOW_CONTROLL, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) + + # then + assert len(results) == 1 + assert [i.numpy_image.shape if i is not None else None for i in results[0]["x"]] == [ + (100, 100, 3), + None, + (50, 50, 3), + None, + (300, 300, 3), + None, + ] + + +WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1_AND_MULTI_FLOW_CONTROLL_1 = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + {"type": "EachSecondPass", "name": "condition", "x": "$inputs.image_1", "next_steps": ["$steps.image_consumer"]}, + {"type": "AlwaysPass", "name": "condition_scalar", "x": "$steps.image_producer_x.image", "next_steps": ["$steps.image_consumer"]}, + { + "type": "SIMDConsumerAcceptingListDecDim", + "name": "image_consumer", + "x": [ + "$inputs.image_1", + "$steps.image_producer_x.image", + "$inputs.image_3", + ], + "y": "some-value", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_selector_when_batch_at_dim_1_and_multi_flow_controll_1( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1_AND_MULTI_FLOW_CONTROLL_1, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) + + # then + assert len(results) == 1 + assert [i.numpy_image.shape if i is not None else None for i in results[0]["x"]] == [ + (100, 100, 3), + None, + (50, 50, 3), + None, + (300, 300, 3), + None, + ] + + +WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1_AND_MULTI_FLOW_CONTROLL_2 = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + {"type": "WorkflowParameter", "name": "some", "default_value": 39}, + ], + "steps": [ + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + {"type": "EachSecondPass", "name": "condition", "x": "$inputs.image_1", "next_steps": ["$steps.image_consumer"]}, + {"type": "AlwaysTerminate", "name": "condition_scalar", "x": "$steps.image_producer_x.image", "next_steps": ["$steps.image_consumer"]}, + { + "type": "SIMDConsumerAcceptingListDecDim", + "name": "image_consumer", + "x": [ + "$inputs.image_1", + "$steps.image_producer_x.image", + "$inputs.image_3", + "$inputs.some", + ], + "y": "some-value", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_selector_when_batch_at_dim_1_and_multi_flow_controll_2( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1_AND_MULTI_FLOW_CONTROLL_2, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) + + # then + assert len(results) == 1 + assert results[0]["x"] is None + + +WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2_WITH_FLOW_CONTROL_AT_DIM_0 = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + { + "type": "DoubleBoostDimensionality", + "name": "dimensionality_boost", + "x": "$inputs.image_1", + "y": "$inputs.image_3", + }, + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + {"type": "AlwaysTerminate", "name": "condition_scalar", "x": "$steps.image_producer_x.image", + "next_steps": ["$steps.image_consumer"]}, + { + "type": "SIMDConsumerAcceptingListDecDim", + "name": "image_consumer", + "x": [ + "$steps.dimensionality_boost.x", + "$steps.image_producer_x.image", + "$steps.dimensionality_boost.y", + ], + "y": "some-value", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_selector_when_batch_at_dim_2_with_flow_controll_at_dim_0( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2_WITH_FLOW_CONTROL_AT_DIM_0, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) + + # then + assert len(results) == 2 + assert results[0]["x"] is None + assert results[1]["x"] is None + + +WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2_WITH_FLOW_CONTROL_AT_DIM_1 = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + {"type": "AlwaysTerminate", "name": "condition_scalar", "x": "$inputs.image_1", + "next_steps": ["$steps.dimensionality_boost"]}, + { + "type": "DoubleBoostDimensionality", + "name": "dimensionality_boost", + "x": "$inputs.image_1", + "y": "$inputs.image_3", + }, + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + { + "type": "SIMDConsumerAcceptingListDecDim", + "name": "image_consumer", + "x": [ + "$steps.dimensionality_boost.x", + "$steps.image_producer_x.image", + "$steps.dimensionality_boost.y", + ], + "y": "some-value", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_selector_when_batch_at_dim_2_with_flow_controll_at_dim_1( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2_WITH_FLOW_CONTROL_AT_DIM_1, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) + + # then + assert len(results) == 2 + assert results[0]["x"] is None + assert results[1]["x"] is None + + +WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2_WITH_FLOW_CONTROL_AT_DIM_2 = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + { + "type": "DoubleBoostDimensionality", + "name": "dimensionality_boost", + "x": "$inputs.image_1", + "y": "$inputs.image_3", + }, + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + {"type": "EachSecondPass", "name": "condition_scalar", "x": "$steps.dimensionality_boost.x", + "next_steps": ["$steps.image_consumer"]}, + { + "type": "SIMDConsumerAcceptingListDecDim", + "name": "image_consumer", + "x": [ + "$steps.dimensionality_boost.x", + "$steps.image_producer_x.image", + "$steps.dimensionality_boost.y", + ], + "y": "some-value", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_selector_when_batch_at_dim_2_with_flow_controll_at_dim_2( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2_WITH_FLOW_CONTROL_AT_DIM_2, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) + + # then + assert len(results) == 2 + assert [i.numpy_image.shape if i is not None else None for i in results[0]["x"]] == [ + (100, 100, 3), + None, + (50, 50, 3), + None, + (300, 300, 3), + None, + ] + assert [i.numpy_image.shape if i is not None else None for i in results[1]["x"]] == [ + (120, 120, 3), + None, + (50, 50, 3), + None, + (320, 320, 3), + None, + ] + + +WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2_WITH_FLOW_CONTROL_AT_DIM_2_AND_DIM_0 = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + { + "type": "DoubleBoostDimensionality", + "name": "dimensionality_boost", + "x": "$inputs.image_1", + "y": "$inputs.image_3", + }, + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + {"type": "AlwaysTerminate", "name": "condition_scalar", "x": "$steps.image_producer_x.image", + "next_steps": ["$steps.image_consumer"]}, + {"type": "EachSecondPass", "name": "condition_batch", "x": "$steps.dimensionality_boost.x", + "next_steps": ["$steps.image_consumer"]}, + { + "type": "SIMDConsumerAcceptingListDecDim", + "name": "image_consumer", + "x": [ + "$steps.dimensionality_boost.x", + "$steps.image_producer_x.image", + "$steps.dimensionality_boost.y", + ], + "y": "some-value", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_selector_when_batch_at_dim_2_with_flow_controll_at_dim_2_and_dim_0( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2_WITH_FLOW_CONTROL_AT_DIM_2_AND_DIM_0, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) + + # then + assert len(results) == 2 + assert results[0]["x"] is None + assert results[1]["x"] is None + + +WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2_WITH_FLOW_CONTROL_AT_DIM_2_AND_DIM_1 = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + {"type": "EachSecondPass", "name": "condition_batch_1", "x": "$inputs.image_1", + "next_steps": ["$steps.dimensionality_boost"]}, + { + "type": "DoubleBoostDimensionality", + "name": "dimensionality_boost", + "x": "$inputs.image_1", + "y": "$inputs.image_3", + }, + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + {"type": "EachSecondPass", "name": "condition_batch_2", "x": "$steps.dimensionality_boost.x", + "next_steps": ["$steps.image_consumer"]}, + { + "type": "SIMDConsumerAcceptingListDecDim", + "name": "image_consumer", + "x": [ + "$steps.dimensionality_boost.x", + "$steps.image_producer_x.image", + "$steps.dimensionality_boost.y", + ], + "y": "some-value", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_selector_when_batch_at_dim_2_with_flow_controll_at_dim_2_and_dim_1( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2_WITH_FLOW_CONTROL_AT_DIM_2_AND_DIM_1, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) + + # then + assert len(results) == 2 + assert [i.numpy_image.shape if i is not None else None for i in results[0]["x"]] == [ + (100, 100, 3), + None, + (50, 50, 3), + None, + (300, 300, 3), + None, + ] + assert results[1]["x"] is None + + +WORKFLOW_WITH_NON_SIMD_CONSUMER_RAISING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCERS_WITH_CONDITIONAL_EXECUTION = { + "version": "1.1", + "inputs": [], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + }, + { + "type": "IdentitySIMD", + "name": "identity_simd", + "x": "$steps.image_producer_x.image", + }, + { + "type": "ImageProducer", + "name": "image_producer_y", + "shape": (220, 230, 3), + }, + { + "type": "MultiImageConsumerRaisingDim", + "name": "image_consumer", + "images_x": "$steps.identity_simd.x", + "images_y": "$steps.image_producer_y.image", + }, + {"type": "AlwaysTerminate", "name": "condition_batch_2", "x": "$steps.image_consumer.shapes", + "next_steps": ["$steps.identity_simd_2"]}, + { + "type": "IdentitySIMD", + "name": "identity_simd_2", + "x": "$steps.image_consumer.shapes", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.identity_simd_2.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_multiple_scalar_producers_feeding_non_simd_consumer_raising_dim_with_conditional_execution( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + + # then + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_SIMD_CONSUMER_RAISING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCERS_WITH_CONDITIONAL_EXECUTION, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run(runtime_parameters={}) + + # then + assert result == [{"shapes": None}] From c4b59cb8b862035785d177e1c7167e6f36cfc734 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Fri, 22 Aug 2025 17:05:16 +0200 Subject: [PATCH 10/20] Make linters happy --- .../step_input_assembler.py | 34 +++- .../plugin_image_producer/__init__.py | 3 +- ...ng_scalars_to_fit_into_batch_parameters.py | 148 ++++++++++++++---- 3 files changed, 146 insertions(+), 39 deletions(-) diff --git a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py index d6cb3588dd..acaf27c708 100644 --- a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py +++ b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py @@ -269,7 +269,9 @@ def construct_mask_for_all_inputs_dimensionalities( all_dimensionalities = {dim for dim in inputs_dimensionalities.values() if dim > 0} print("all_dimensionalities", all_dimensionalities) batch_masks, non_batch_masks = [], set() - print(f"Execution branches impacting inputs: {step_node.execution_branches_impacting_inputs}") + print( + f"Execution branches impacting inputs: {step_node.execution_branches_impacting_inputs}" + ) for execution_branch in step_node.execution_branches_impacting_inputs: if not branching_manager.is_execution_branch_registered( execution_branch=execution_branch @@ -281,16 +283,22 @@ def construct_mask_for_all_inputs_dimensionalities( execution_branch=execution_branch ): mask = branching_manager.get_mask(execution_branch=execution_branch) - print(f"EXECUTION BRANCH: {execution_branch} is batch oriented - mask: {mask}") + print( + f"EXECUTION BRANCH: {execution_branch} is batch oriented - mask: {mask}" + ) batch_masks.append(mask) else: mask = branching_manager.get_mask(execution_branch=execution_branch) - print(f"EXECUTION BRANCH: {execution_branch} is not batch oriented - mask: {mask}") + print( + f"EXECUTION BRANCH: {execution_branch} is not batch oriented - mask: {mask}" + ) non_batch_masks.add(mask) scalar_mask_contains_false = False in non_batch_masks if scalar_mask_contains_false: print("CANCELLING OUT!") - return {dimension: set() for dimension in all_dimensionalities}, scalar_mask_contains_false + return { + dimension: set() for dimension in all_dimensionalities + }, scalar_mask_contains_false return { dimension: get_masks_intersection_up_to_dimension( batch_masks=batch_masks, @@ -720,7 +728,11 @@ def apply_auto_batch_casting( batch_content.append(None) created_batch = Batch(content=batch_content, indices=indices) if step_execution_dimensionality == auto_batch_casting_config.casted_dimensionality: - return created_batch, indices, contains_empty_scalar_step_output_selector or scalars_discarded + return ( + created_batch, + indices, + contains_empty_scalar_step_output_selector or scalars_discarded, + ) if step_execution_dimensionality > auto_batch_casting_config.casted_dimensionality: raise ExecutionEngineRuntimeError( public_message=f"Detected a situation when parameter: " @@ -757,7 +769,11 @@ def apply_auto_batch_casting( ) if upper_level_lineage_dimensionality == 0 and not step_requests_batch_input: # for batch collapse into scalar - return created_batch, indices, contains_empty_scalar_step_output_selector or scalars_discarded + return ( + created_batch, + indices, + contains_empty_scalar_step_output_selector or scalars_discarded, + ) if auto_batch_casting_config.lineage_support is None: upper_level_indices = [indices[0][:-1]] else: @@ -791,7 +807,11 @@ def apply_auto_batch_casting( data=batch_content, guard_of_indices_wrapping=guard_of_indices_wrapping, ) - return result, result.indices, contains_empty_scalar_step_output_selector or scalars_discarded + return ( + result, + result.indices, + contains_empty_scalar_step_output_selector or scalars_discarded, + ) def _flatten_batch_oriented_inputs( diff --git a/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py b/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py index f131f75283..7878b8152a 100644 --- a/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py +++ b/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py @@ -15,7 +15,8 @@ from inference.core.workflows.execution_engine.entities.types import ( IMAGE_KIND, STRING_KIND, - Selector, StepSelector, + Selector, + StepSelector, ) from inference.core.workflows.execution_engine.v1.entities import FlowControl from inference.core.workflows.prototypes.block import ( diff --git a/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py b/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py index 0473dff6c8..f51ddaa5f8 100644 --- a/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py +++ b/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py @@ -2960,7 +2960,12 @@ def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_ "version": "1.1", "inputs": [], "steps": [ - {"type": "AlwaysTerminate", "name": "condition", "x": "dummy", "next_steps": ["$steps.image_producer_x"]}, + { + "type": "AlwaysTerminate", + "name": "condition", + "x": "dummy", + "next_steps": ["$steps.image_producer_x"], + }, {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, {"type": "ImageProducer", "name": "image_producer_y", "shape": (60, 60, 3)}, { @@ -3012,7 +3017,12 @@ def test_workflow_always_blocking_simd_producer( "version": "1.1", "inputs": [], "steps": [ - {"type": "AlwaysPass", "name": "condition", "x": "dummy", "next_steps": ["$steps.image_producer_x"]}, + { + "type": "AlwaysPass", + "name": "condition", + "x": "dummy", + "next_steps": ["$steps.image_producer_x"], + }, {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, {"type": "ImageProducer", "name": "image_producer_y", "shape": (60, 60, 3)}, { @@ -3065,7 +3075,12 @@ def test_workflow_always_passing_simd_producer( "inputs": [], "steps": [ {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, - {"type": "AlwaysTerminate", "name": "condition", "x": "$steps.image_producer_x.image", "next_steps": ["$steps.image_consumer"]}, + { + "type": "AlwaysTerminate", + "name": "condition", + "x": "$steps.image_producer_x.image", + "next_steps": ["$steps.image_consumer"], + }, {"type": "ImageProducer", "name": "image_producer_y", "shape": (60, 60, 3)}, { "type": "SIMDConsumerAcceptingListDecDim", @@ -3117,7 +3132,12 @@ def test_workflow_always_blocking_simd_producer_after_production( "inputs": [], "steps": [ {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, - {"type": "AlwaysPass", "name": "condition", "x": "$steps.image_producer_x.image", "next_steps": ["$steps.image_consumer"]}, + { + "type": "AlwaysPass", + "name": "condition", + "x": "$steps.image_producer_x.image", + "next_steps": ["$steps.image_consumer"], + }, {"type": "ImageProducer", "name": "image_producer_y", "shape": (60, 60, 3)}, { "type": "SIMDConsumerAcceptingListDecDim", @@ -3164,7 +3184,6 @@ def test_workflow_always_passing_simd_producer_after_production( assert [i.numpy_image.shape for i in results[0]["x"]] == [(50, 50, 3), (60, 60, 3)] - WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1_AND_FLOW_CONTROLL = { "version": "1.1", "inputs": [ @@ -3173,7 +3192,12 @@ def test_workflow_always_passing_simd_producer_after_production( ], "steps": [ {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, - {"type": "EachSecondPass", "name": "condition", "x": "$inputs.image_1", "next_steps": ["$steps.image_consumer"]}, + { + "type": "EachSecondPass", + "name": "condition", + "x": "$inputs.image_1", + "next_steps": ["$steps.image_consumer"], + }, { "type": "SIMDConsumerAcceptingListDecDim", "name": "image_consumer", @@ -3225,7 +3249,9 @@ def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_ # then assert len(results) == 1 - assert [i.numpy_image.shape if i is not None else None for i in results[0]["x"]] == [ + assert [ + i.numpy_image.shape if i is not None else None for i in results[0]["x"] + ] == [ (100, 100, 3), None, (50, 50, 3), @@ -3243,8 +3269,18 @@ def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_ ], "steps": [ {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, - {"type": "EachSecondPass", "name": "condition", "x": "$inputs.image_1", "next_steps": ["$steps.image_consumer"]}, - {"type": "AlwaysPass", "name": "condition_scalar", "x": "$steps.image_producer_x.image", "next_steps": ["$steps.image_consumer"]}, + { + "type": "EachSecondPass", + "name": "condition", + "x": "$inputs.image_1", + "next_steps": ["$steps.image_consumer"], + }, + { + "type": "AlwaysPass", + "name": "condition_scalar", + "x": "$steps.image_producer_x.image", + "next_steps": ["$steps.image_consumer"], + }, { "type": "SIMDConsumerAcceptingListDecDim", "name": "image_consumer", @@ -3296,7 +3332,9 @@ def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_ # then assert len(results) == 1 - assert [i.numpy_image.shape if i is not None else None for i in results[0]["x"]] == [ + assert [ + i.numpy_image.shape if i is not None else None for i in results[0]["x"] + ] == [ (100, 100, 3), None, (50, 50, 3), @@ -3315,8 +3353,18 @@ def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_ ], "steps": [ {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, - {"type": "EachSecondPass", "name": "condition", "x": "$inputs.image_1", "next_steps": ["$steps.image_consumer"]}, - {"type": "AlwaysTerminate", "name": "condition_scalar", "x": "$steps.image_producer_x.image", "next_steps": ["$steps.image_consumer"]}, + { + "type": "EachSecondPass", + "name": "condition", + "x": "$inputs.image_1", + "next_steps": ["$steps.image_consumer"], + }, + { + "type": "AlwaysTerminate", + "name": "condition_scalar", + "x": "$steps.image_producer_x.image", + "next_steps": ["$steps.image_consumer"], + }, { "type": "SIMDConsumerAcceptingListDecDim", "name": "image_consumer", @@ -3386,8 +3434,12 @@ def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_ "y": "$inputs.image_3", }, {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, - {"type": "AlwaysTerminate", "name": "condition_scalar", "x": "$steps.image_producer_x.image", - "next_steps": ["$steps.image_consumer"]}, + { + "type": "AlwaysTerminate", + "name": "condition_scalar", + "x": "$steps.image_producer_x.image", + "next_steps": ["$steps.image_consumer"], + }, { "type": "SIMDConsumerAcceptingListDecDim", "name": "image_consumer", @@ -3450,8 +3502,12 @@ def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_ {"type": "WorkflowImage", "name": "image_3"}, ], "steps": [ - {"type": "AlwaysTerminate", "name": "condition_scalar", "x": "$inputs.image_1", - "next_steps": ["$steps.dimensionality_boost"]}, + { + "type": "AlwaysTerminate", + "name": "condition_scalar", + "x": "$inputs.image_1", + "next_steps": ["$steps.dimensionality_boost"], + }, { "type": "DoubleBoostDimensionality", "name": "dimensionality_boost", @@ -3528,8 +3584,12 @@ def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_ "y": "$inputs.image_3", }, {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, - {"type": "EachSecondPass", "name": "condition_scalar", "x": "$steps.dimensionality_boost.x", - "next_steps": ["$steps.image_consumer"]}, + { + "type": "EachSecondPass", + "name": "condition_scalar", + "x": "$steps.dimensionality_boost.x", + "next_steps": ["$steps.image_consumer"], + }, { "type": "SIMDConsumerAcceptingListDecDim", "name": "image_consumer", @@ -3581,7 +3641,9 @@ def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_ # then assert len(results) == 2 - assert [i.numpy_image.shape if i is not None else None for i in results[0]["x"]] == [ + assert [ + i.numpy_image.shape if i is not None else None for i in results[0]["x"] + ] == [ (100, 100, 3), None, (50, 50, 3), @@ -3589,7 +3651,9 @@ def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_ (300, 300, 3), None, ] - assert [i.numpy_image.shape if i is not None else None for i in results[1]["x"]] == [ + assert [ + i.numpy_image.shape if i is not None else None for i in results[1]["x"] + ] == [ (120, 120, 3), None, (50, 50, 3), @@ -3613,10 +3677,18 @@ def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_ "y": "$inputs.image_3", }, {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, - {"type": "AlwaysTerminate", "name": "condition_scalar", "x": "$steps.image_producer_x.image", - "next_steps": ["$steps.image_consumer"]}, - {"type": "EachSecondPass", "name": "condition_batch", "x": "$steps.dimensionality_boost.x", - "next_steps": ["$steps.image_consumer"]}, + { + "type": "AlwaysTerminate", + "name": "condition_scalar", + "x": "$steps.image_producer_x.image", + "next_steps": ["$steps.image_consumer"], + }, + { + "type": "EachSecondPass", + "name": "condition_batch", + "x": "$steps.dimensionality_boost.x", + "next_steps": ["$steps.image_consumer"], + }, { "type": "SIMDConsumerAcceptingListDecDim", "name": "image_consumer", @@ -3679,8 +3751,12 @@ def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_ {"type": "WorkflowImage", "name": "image_3"}, ], "steps": [ - {"type": "EachSecondPass", "name": "condition_batch_1", "x": "$inputs.image_1", - "next_steps": ["$steps.dimensionality_boost"]}, + { + "type": "EachSecondPass", + "name": "condition_batch_1", + "x": "$inputs.image_1", + "next_steps": ["$steps.dimensionality_boost"], + }, { "type": "DoubleBoostDimensionality", "name": "dimensionality_boost", @@ -3688,8 +3764,12 @@ def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_ "y": "$inputs.image_3", }, {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, - {"type": "EachSecondPass", "name": "condition_batch_2", "x": "$steps.dimensionality_boost.x", - "next_steps": ["$steps.image_consumer"]}, + { + "type": "EachSecondPass", + "name": "condition_batch_2", + "x": "$steps.dimensionality_boost.x", + "next_steps": ["$steps.image_consumer"], + }, { "type": "SIMDConsumerAcceptingListDecDim", "name": "image_consumer", @@ -3741,7 +3821,9 @@ def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_ # then assert len(results) == 2 - assert [i.numpy_image.shape if i is not None else None for i in results[0]["x"]] == [ + assert [ + i.numpy_image.shape if i is not None else None for i in results[0]["x"] + ] == [ (100, 100, 3), None, (50, 50, 3), @@ -3776,8 +3858,12 @@ def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_ "images_x": "$steps.identity_simd.x", "images_y": "$steps.image_producer_y.image", }, - {"type": "AlwaysTerminate", "name": "condition_batch_2", "x": "$steps.image_consumer.shapes", - "next_steps": ["$steps.identity_simd_2"]}, + { + "type": "AlwaysTerminate", + "name": "condition_batch_2", + "x": "$steps.image_consumer.shapes", + "next_steps": ["$steps.identity_simd_2"], + }, { "type": "IdentitySIMD", "name": "identity_simd_2", From ac237f26a31156b8b3b1663315ab809f3405893d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Fri, 22 Aug 2025 17:12:27 +0200 Subject: [PATCH 11/20] Clean up --- .../v1/compiler/graph_constructor.py | 12 ---------- .../execution_engine/v1/executor/core.py | 14 ------------ .../dynamic_batches_manager.py | 1 - .../execution_data_manager/execution_cache.py | 6 ----- .../execution_data_manager/manager.py | 19 ---------------- .../step_input_assembler.py | 22 ------------------- .../v1/executor/output_constructor.py | 7 ------ .../detections_to_parent_coordinates_batch.py | 7 ------ .../plugin_image_producer/__init__.py | 3 --- .../rock_paper_scissor_plugin/expression.py | 4 +--- .../test_workflow_with_detection_plus_ocr.py | 1 - ...est_workflow_with_dimensionality_change.py | 2 -- .../test_workflow_with_dominant_color.py | 2 +- .../test_workflow_with_llama_vision.py | 3 --- 14 files changed, 2 insertions(+), 101 deletions(-) diff --git a/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py b/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py index 802e333d56..c95d5ca02a 100644 --- a/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py +++ b/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py @@ -735,12 +735,10 @@ def denote_data_flow_for_step( ) ) input_dimensionality_offsets = manifest.get_input_dimensionality_offsets() - print("input_dimensionality_offsets", input_dimensionality_offsets) verify_step_input_dimensionality_offsets( step_name=step_name, input_dimensionality_offsets=input_dimensionality_offsets, ) - print("scalar_parameters_to_be_batched", scalar_parameters_to_be_batched) inputs_dimensionalities = get_inputs_dimensionalities( step_name=step_name, step_type=manifest.type, @@ -748,18 +746,14 @@ def denote_data_flow_for_step( scalar_parameters_to_be_batched=scalar_parameters_to_be_batched, input_dimensionality_offsets=input_dimensionality_offsets, ) - print("inputs_dimensionalities", inputs_dimensionalities) logger.debug( f"For step: {node}, detected the following input dimensionalities: {inputs_dimensionalities}" ) parameters_with_batch_inputs = grab_parameters_defining_batch_inputs( inputs_dimensionalities=inputs_dimensionalities, ) - print("parameters_with_batch_inputs", parameters_with_batch_inputs) dimensionality_reference_property = manifest.get_dimensionality_reference_property() - print("dimensionality_reference_property", dimensionality_reference_property) output_dimensionality_offset = manifest.get_output_dimensionality_offset() - print("output_dimensionality_offset", output_dimensionality_offset) verify_step_input_dimensionality_offsets( step_name=step_name, input_dimensionality_offsets=input_dimensionality_offsets, @@ -818,8 +812,6 @@ def denote_data_flow_for_step( scalar_parameters_to_be_batched=scalar_parameters_to_be_batched, ) step_node_data.auto_batch_casting_lineage_supports = lineage_supports - print("lineage_supports", lineage_supports) - print("Data lineage of block output", data_lineage) if data_lineage: on_top_level_lineage_denoted(data_lineage[0]) step_node_data.data_lineage = data_lineage @@ -1614,9 +1606,6 @@ def verify_declared_batch_compatibility_against_actual_inputs( ) if batch_compatibility == {True} and False in actual_input_is_batch: scalar_parameters_to_be_batched.add(property_name) - print( - f"property_name: {property_name}, batch_compatibility={batch_compatibility}, actual_input_is_batch={actual_input_is_batch}, step_accepts_batch_input={step_accepts_batch_input}" - ) return scalar_parameters_to_be_batched @@ -1665,7 +1654,6 @@ def get_lineage_support_for_auto_batch_casted_parameters( casted_dimensionality=parameter_dimensionality, lineage_support=lineage_support, ) - print("DUMMY", result) return result diff --git a/inference/core/workflows/execution_engine/v1/executor/core.py b/inference/core/workflows/execution_engine/v1/executor/core.py index fe4147d561..f41859f2a1 100644 --- a/inference/core/workflows/execution_engine/v1/executor/core.py +++ b/inference/core/workflows/execution_engine/v1/executor/core.py @@ -156,9 +156,6 @@ def run_step( execution_data_manager: ExecutionDataManager, profiler: WorkflowsProfiler, ) -> None: - print( - f"{step_selector} - IS SIMD: {execution_data_manager.is_step_simd(step_selector=step_selector)}" - ) if execution_data_manager.is_step_simd(step_selector=step_selector): return run_simd_step( step_selector=step_selector, @@ -183,21 +180,12 @@ def run_simd_step( step_name = get_last_chunk_of_selector(selector=step_selector) step_instance = workflow.steps[step_name].step step_manifest = workflow.steps[step_name].manifest - print( - f"{step_selector} - accepts_batch_input: {step_manifest.accepts_batch_input()}" - ) collapse_of_batch_to_scalar_expected = ( step_manifest.get_output_dimensionality_offset() < 0 and not execution_data_manager.does_step_produce_batches( step_selector=step_selector ) ) - print( - "collapse_of_batch_to_scalar_expected", - collapse_of_batch_to_scalar_expected, - step_manifest.get_output_dimensionality_offset(), - execution_data_manager.does_step_produce_batches(step_selector=step_selector), - ) if step_manifest.accepts_batch_input() or collapse_of_batch_to_scalar_expected: return run_simd_step_in_batch_mode( step_selector=step_selector, @@ -227,7 +215,6 @@ def run_simd_step_in_batch_mode( step_input = execution_data_manager.get_simd_step_input( step_selector=step_selector, ) - print(f"step_input: {step_input}") with profiler.profile_execution_phase( name="step_code_execution", categories=["workflow_block_operation"], @@ -241,7 +228,6 @@ def run_simd_step_in_batch_mode( outputs = [] else: outputs = step_instance.run(**step_input.parameters) - print(f"outputs: {outputs}") with profiler.profile_execution_phase( name="step_output_registration", categories=["execution_engine_operation"], diff --git a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/dynamic_batches_manager.py b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/dynamic_batches_manager.py index 0771aa2689..a35938ea42 100644 --- a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/dynamic_batches_manager.py +++ b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/dynamic_batches_manager.py @@ -28,7 +28,6 @@ def init( execution_graph=execution_graph, runtime_parameters=runtime_parameters, ) - print("lineage2indices", lineage2indices) return cls(lineage2indices=lineage2indices) def __init__( diff --git a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/execution_cache.py b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/execution_cache.py index 14434d1683..7bf5148bd8 100644 --- a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/execution_cache.py +++ b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/execution_cache.py @@ -87,7 +87,6 @@ def register_batch_of_step_outputs( indices: List[DynamicBatchIndex], outputs: List[Dict[str, Any]], ) -> None: - print(f"REGISTERING {step_name} - {indices} - {outputs}") if not self.step_outputs_batches(step_name=step_name): raise ExecutionEngineRuntimeError( public_message=f"Error in execution engine. Attempted to register batch outputs for " @@ -101,11 +100,6 @@ def register_batch_of_step_outputs( self._cache_content[step_name].register_outputs( indices=indices, outputs=outputs ) - print( - "VERIF", - id(self._cache_content[step_name]), - self._cache_content[step_name]._cache_content, - ) self._step_outputs_registered.add(step_name) except (TypeError, AttributeError) as e: # checking this case defensively as there is no guarantee on block diff --git a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py index 90b2b331b6..f7c2e99513 100644 --- a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py +++ b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py @@ -141,9 +141,6 @@ def register_non_simd_step_output( if step_node.output_dimensionality == 1: # we only allow +1 dim increase for now, so it is fine to only handle this case indices = [(i,) for i in range(len(output))] - print( - f"DIMENSIONALITY WAS JUST BORN FOR LINEAGE: {step_node.data_lineage} with indices: {indices} :)" - ) self._dynamic_batches_manager.register_element_indices_for_lineage( lineage=step_node.data_lineage, indices=indices, @@ -180,7 +177,6 @@ def register_non_simd_step_output( context="workflow_execution | step_output_registration", ) if isinstance(output, FlowControl): - print("FLOW", step_node.name, output) self._register_flow_control_output_for_non_simd_step( step_node=step_node, output=output, @@ -262,20 +258,11 @@ def register_simd_step_output( node=step_selector, expected_type=StepNode, ) - print( - f"Output data lineage: ", - step_node.data_lineage, - step_node.output_dimensionality, - step_node.step_execution_dimensionality, - ) step_name = get_last_chunk_of_selector(selector=step_selector) if step_node.output_dimensionality == 0: - print("COLLAPSE") - print("outputs", outputs) # SIMD step collapsing into scalar (can happen for auto-batch casting of parameters) if isinstance(outputs, list): if len(outputs) == 0: - print("TERMINATING", step_selector) # termination of the computation as in NON-SIMD case return None if len(outputs) != 1: @@ -371,9 +358,6 @@ def get_selector_indices(self, selector: str) -> Optional[List[DynamicBatchIndex f"the problem - including workflow definition you use.", context="workflow_execution | getting_workflow_data_indices", ) - print( - f"get_selector_indices(selector={selector}): - selector_lineage: {selector_lineage}" - ) if not selector_lineage: return None return self.get_lineage_indices(lineage=selector_lineage) @@ -471,9 +455,6 @@ def get_batch_data( step_name=step_name, batch_elements_indices=indices, ) - print( - f"Getting batch results with selector: {selector} from indices: {indices}" - ) return self._execution_cache.get_batch_output( selector=selector, batch_elements_indices=indices, diff --git a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py index acaf27c708..89fc07427c 100644 --- a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py +++ b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py @@ -264,38 +264,25 @@ def construct_mask_for_all_inputs_dimensionalities( step_node: StepNode, branching_manager: BranchingManager, ) -> Tuple[Any, bool]: - print(f"Collecting masks for: {step_node.name}") inputs_dimensionalities = collect_inputs_dimensionalities(step_node=step_node) all_dimensionalities = {dim for dim in inputs_dimensionalities.values() if dim > 0} - print("all_dimensionalities", all_dimensionalities) batch_masks, non_batch_masks = [], set() - print( - f"Execution branches impacting inputs: {step_node.execution_branches_impacting_inputs}" - ) for execution_branch in step_node.execution_branches_impacting_inputs: if not branching_manager.is_execution_branch_registered( execution_branch=execution_branch ): - print(f"EXECUTION BRANCH: {execution_branch} not registered") non_batch_masks.add(False) continue if branching_manager.is_execution_branch_batch_oriented( execution_branch=execution_branch ): mask = branching_manager.get_mask(execution_branch=execution_branch) - print( - f"EXECUTION BRANCH: {execution_branch} is batch oriented - mask: {mask}" - ) batch_masks.append(mask) else: mask = branching_manager.get_mask(execution_branch=execution_branch) - print( - f"EXECUTION BRANCH: {execution_branch} is not batch oriented - mask: {mask}" - ) non_batch_masks.add(mask) scalar_mask_contains_false = False in non_batch_masks if scalar_mask_contains_false: - print("CANCELLING OUT!") return { dimension: set() for dimension in all_dimensionalities }, scalar_mask_contains_false @@ -377,8 +364,6 @@ def prepare_parameters( runtime_parameters: Dict[str, Any], execution_cache: ExecutionCache, ) -> BatchModeSIMDStepInput: - print(f"PREPARING PARAMS FOR: {step_node.name} - masks: {masks}") - print("DDD", step_node.auto_batch_casting_lineage_supports) step_requests_batch_input = step_node.step_manifest.accepts_batch_input() result = {} indices_for_parameter = {} @@ -425,7 +410,6 @@ def prepare_parameters( contains_empty_scalar_step_output_selector or value_contains_empty_scalar_step_output_selector ) - print("indices_for_parameter", indices_for_parameter, result) batch_parameters_indices = [ i for i in indices_for_parameter.values() if i is not None ] @@ -695,9 +679,6 @@ def apply_auto_batch_casting( masks: Dict[int, Optional[Set[DynamicBatchIndex]]], scalars_discarded: bool, ) -> Tuple[Any, List[DynamicBatchIndex], bool]: - print( - f"parameter_name: {parameter_name} - auto_batch_casting_config: {auto_batch_casting_config}" - ) if auto_batch_casting_config.lineage_support is None: indices = [(0,) * auto_batch_casting_config.casted_dimensionality] else: @@ -763,7 +744,6 @@ def apply_auto_batch_casting( f"the problem - including workflow definition you use.", context="workflow_execution | step_input_assembling", ) - print(f"SSSS, step_execution_dimensionality: {step_execution_dimensionality}") upper_level_lineage_dimensionality = ( auto_batch_casting_config.casted_dimensionality - 1 ) @@ -799,8 +779,6 @@ def apply_auto_batch_casting( upper_level_indices = dynamic_batches_manager.get_indices_for_data_lineage( lineage=upper_level_lineage, ) - print("REDUCTION!") - print("upper_level_indices", upper_level_indices) result = reduce_batch_dimensionality( indices=indices, upper_level_index=upper_level_indices, diff --git a/inference/core/workflows/execution_engine/v1/executor/output_constructor.py b/inference/core/workflows/execution_engine/v1/executor/output_constructor.py index 7ab08c9c2e..7e5b9bbb92 100644 --- a/inference/core/workflows/execution_engine/v1/executor/output_constructor.py +++ b/inference/core/workflows/execution_engine/v1/executor/output_constructor.py @@ -46,11 +46,9 @@ def construct_workflow_output( output_name2indices[output.name] = execution_data_manager.get_selector_indices( selector=output.selector ) - print("output_name2indices", output_name2indices) batch_oriented_outputs = { output for output, indices in output_name2indices.items() if indices is not None } - print("batch_oriented_outputs", batch_oriented_outputs) kinds_of_output_nodes = { output.name: node_as( execution_graph=execution_graph, @@ -63,7 +61,6 @@ def construct_workflow_output( for output in workflow_outputs: if output.name in batch_oriented_outputs: continue - print(f"taking {output.name} from {output.selector} as non batch") data_piece = execution_data_manager.get_non_batch_data(selector=output.selector) if serialize_results: output_kind = kinds_of_output_nodes[output.name] @@ -111,8 +108,6 @@ def construct_workflow_output( selector=name2selector[name], indices=indices, ) - print(f"Retrieved data for {name} - {data}") - print(f"output array: {array}") for index, data_piece in zip(indices, data): if ( name in outputs_requested_in_parent_coordinates @@ -142,9 +137,7 @@ def construct_workflow_output( f"the problem - including workflow definition you use.", context="workflow_execution | output_construction", ) - print("outputs_arrays", outputs_arrays) results = [] - print("major_batch_size", major_batch_size) for i in range(major_batch_size): single_result = {} for name, value in non_batch_outputs.items(): diff --git a/tests/workflows/integration_tests/execution/stub_plugins/dimensionality_manipulation_plugin/detections_to_parent_coordinates_batch.py b/tests/workflows/integration_tests/execution/stub_plugins/dimensionality_manipulation_plugin/detections_to_parent_coordinates_batch.py index 974e11d7ab..414dfcfb65 100644 --- a/tests/workflows/integration_tests/execution/stub_plugins/dimensionality_manipulation_plugin/detections_to_parent_coordinates_batch.py +++ b/tests/workflows/integration_tests/execution/stub_plugins/dimensionality_manipulation_plugin/detections_to_parent_coordinates_batch.py @@ -103,14 +103,10 @@ def run( ) -> BlockResult: result = [] for i, (image, image_predictions) in enumerate(zip(images, images_predictions)): - print("Processing image", i) parent_id = image.parent_metadata.parent_id parent_coordinates = image.parent_metadata.origin_coordinates transformed_predictions = [] for j, prediction in enumerate(image_predictions): - print( - f"Processing prediction {j} - start {len(prediction)} - {prediction['parent_id']}" - ) prediction_copy = deepcopy(prediction) prediction_copy["parent_id"] = np.array([parent_id] * len(prediction)) if parent_coordinates: @@ -125,9 +121,6 @@ def run( prediction_copy["parent_dimensions"] = np.array( [dimensions] * len(prediction) ) - print( - f"Processing prediction {j} - end {len(prediction_copy)} - {prediction_copy['parent_id']}" - ) transformed_predictions.append({"predictions": prediction_copy}) result.append(transformed_predictions) return result diff --git a/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py b/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py index 7878b8152a..6613569a91 100644 --- a/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py +++ b/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py @@ -284,7 +284,6 @@ def run( additional: Any, ) -> BlockResult: assert not isinstance(additional, Batch) - print("images_x", images_x, "images_y", images_y) results = [] for image_x, image_y in zip(images_x, images_y): results.append( @@ -330,10 +329,8 @@ def run( additional: Any, ) -> BlockResult: assert not isinstance(additional, Batch) - print("images_x", images_x, "images_y", images_y) results = [] for image_x_batch, image_y_batch in zip(images_x, images_y): - print("image_x_batch", image_x_batch, "image_x_batch", image_y_batch) result = [] for image_x, image_y in zip(image_x_batch, image_y_batch): result.append( diff --git a/tests/workflows/integration_tests/execution/stub_plugins/rock_paper_scissor_plugin/expression.py b/tests/workflows/integration_tests/execution/stub_plugins/rock_paper_scissor_plugin/expression.py index eed0130a29..bebe4e1dac 100644 --- a/tests/workflows/integration_tests/execution/stub_plugins/rock_paper_scissor_plugin/expression.py +++ b/tests/workflows/integration_tests/execution/stub_plugins/rock_paper_scissor_plugin/expression.py @@ -65,6 +65,4 @@ def run( params = ", ".join(f"{k}={k}" for k in data) code = output.code + f"\n\nresult = function({params})" exec(code, data, results) - result = {"output": results["result"]} - print("result", result) - return result + return {"output": results["result"]} diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_detection_plus_ocr.py b/tests/workflows/integration_tests/execution/test_workflow_with_detection_plus_ocr.py index 7bda47948f..a2a0fb5196 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_detection_plus_ocr.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_detection_plus_ocr.py @@ -130,7 +130,6 @@ def test_detection_plus_ocr_workflow_when_minimal_valid_input_provided( # then assert isinstance(result, list), "Expected list to be delivered" assert len(result) == 1, "Expected 1 element in the output for one input image" - print(result[0]) assert set(result[0].keys()) == { "plates_ocr", "plates_crops", diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_dimensionality_change.py b/tests/workflows/integration_tests/execution/test_workflow_with_dimensionality_change.py index 7548a0b92e..97f9a7d321 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_dimensionality_change.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_dimensionality_change.py @@ -132,8 +132,6 @@ def test_workflow_with_detections_coordinates_transformation_in_batch_variant( result[0]["predictions_in_own_coordinates"], result[0]["predictions_in_original_coordinates"], ): - print(own_coords_detection["parent_id"]) - print(original_coords_detection["parent_id"]) assert len(own_coords_detection) == len( original_coords_detection ), "Expected number of bounding boxes in nested sv.Detections not to change" diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_dominant_color.py b/tests/workflows/integration_tests/execution/test_workflow_with_dominant_color.py index 0ebc83fa93..7e066b9165 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_dominant_color.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_dominant_color.py @@ -60,7 +60,7 @@ def test_dominant_color_workflow_when_minimal_valid_input_provided( "image": red_image, } ) - print(result) + # then assert isinstance(result, list), "Expected list to be delivered" assert len(result) == 1, "Expected 1 element in the output for one input image" diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_llama_vision.py b/tests/workflows/integration_tests/execution/test_workflow_with_llama_vision.py index f35cbf21bf..cd53c9a7f1 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_llama_vision.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_llama_vision.py @@ -393,7 +393,6 @@ def test_workflow_with_multi_class_classifier_prompt_and_legacy_parser( "top_class", "parsed_prediction", }, "Expected all outputs to be delivered" - print(result[0]["llama_result"]) assert ( isinstance(result[0]["llama_result"], str) and len(result[0]["llama_result"]) > 0 @@ -500,7 +499,6 @@ def test_workflow_with_multi_class_classifier_prompt( "top_class", "parsed_prediction", }, "Expected all outputs to be delivered" - print(result[0]["llama_result"]) assert ( isinstance(result[0]["llama_result"], str) and len(result[0]["llama_result"]) > 0 @@ -695,7 +693,6 @@ def test_workflow_with_structured_prompt( "result", "llama_output", }, "Expected all outputs to be delivered" - print(result[0]["llama_output"]) assert isinstance(result[0]["result"], str) From 4cf7bb1d344d049943c81d8ed75157619e35207c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Fri, 22 Aug 2025 19:57:29 +0200 Subject: [PATCH 12/20] Fix issue with the dimensionality increase in terms of auto-batch-casting batch-oriented steps --- .../execution_data_manager/manager.py | 51 +++ .../v1/executor/output_constructor.py | 7 + .../plugin_image_producer/__init__.py | 4 + ...ng_scalars_to_fit_into_batch_parameters.py | 414 ++++++++++++++++++ 4 files changed, 476 insertions(+) diff --git a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py index f7c2e99513..a4d4ad7539 100644 --- a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py +++ b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py @@ -292,6 +292,57 @@ def register_simd_step_output( return None if ( step_node.output_dimensionality - step_node.step_execution_dimensionality + == 0 + and step_node.step_manifest.get_output_dimensionality_offset() > 0 + ): + # artificial increase in output dimensionality due to ABC which should be unwrapped + if isinstance(outputs, list) and len(outputs) == 0: + self._dynamic_batches_manager.register_element_indices_for_lineage( + lineage=step_node.data_lineage, + indices=indices, + ) + if step_node.child_execution_branches: + if not all(isinstance(element, FlowControl) for element in outputs): + raise ExecutionEngineRuntimeError( + public_message=f"Error in execution engine. Flow control step {step_name} " + f"expected to only produce FlowControl objects. This is most likely bug. " + f"Contact Roboflow team through github issues " + f"(https://github.com/roboflow/inference/issues) providing full context of" + f"the problem - including workflow definition you use.", + context="workflow_execution | step_output_registration", + ) + self._register_flow_control_output_for_simd_step( + step_node=step_node, + indices=indices, + outputs=outputs, + ) + return None + self._execution_cache.register_batch_of_step_outputs( + step_name=step_name, + indices=indices, + outputs=outputs, + ) + return None + if not isinstance(outputs, list) or len(outputs) != 1: + raise AssumptionError( + public_message=f"Error in execution engine. In context of SIMD step: {step_selector} attempts to " + f"register output which should be nested, 1-element batch, but detected batched " + f"output with more than a single element (or incompatible output), " + f"making the operation not possible. This is most likely bug (either a block or " + f"Execution Engine is faulty). Contact Roboflow team through github issues " + f"(https://github.com/roboflow/inference/issues) providing full context of" + f"the problem - including workflow definition you use.", + context="workflow_execution | step_output_registration", + ) + index_base = indices[0][:-1] + outputs = outputs[0] + indices = [index_base + (i,) for i in range(len(outputs))] + self._dynamic_batches_manager.register_element_indices_for_lineage( + lineage=step_node.data_lineage, + indices=indices, + ) + elif ( + step_node.output_dimensionality - step_node.step_execution_dimensionality ) > 0: # increase in dimensionality indices, outputs = flatten_nested_output( diff --git a/inference/core/workflows/execution_engine/v1/executor/output_constructor.py b/inference/core/workflows/execution_engine/v1/executor/output_constructor.py index 7e5b9bbb92..95724ab79c 100644 --- a/inference/core/workflows/execution_engine/v1/executor/output_constructor.py +++ b/inference/core/workflows/execution_engine/v1/executor/output_constructor.py @@ -99,6 +99,13 @@ def construct_workflow_output( lineage=[top_level_data_lineage_marker] ) ) + if ( + major_batch_size == 0 + and top_level_data_lineage_marker != WORKFLOW_INPUT_BATCH_LINEAGE_ID + ): + # we had some dynamic dimensionality increase on top of auto-batch casting, but we + # failed to register indices due to conditional execution + major_batch_size = 1 else: major_batch_size = 0 for name in batch_oriented_outputs: diff --git a/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py b/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py index 6613569a91..94080364aa 100644 --- a/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py +++ b/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py @@ -221,6 +221,10 @@ def describe_outputs(cls) -> List[OutputDefinition]: def get_execution_engine_compatibility(cls) -> Optional[str]: return ">=1.3.0,<2.0.0" + @classmethod + def get_parameters_accepting_batches(cls) -> List[str]: + return ["images_x", "images_y"] + @classmethod def get_output_dimensionality_offset(cls) -> int: return 1 diff --git a/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py b/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py index f51ddaa5f8..10d07d8227 100644 --- a/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py +++ b/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py @@ -3907,3 +3907,417 @@ def test_workflow_with_multiple_scalar_producers_feeding_non_simd_consumer_raisi # then assert result == [{"shapes": None}] + + +WORKFLOW_WITH_SIMD_CONSUMER_RAISING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCERS = { + "version": "1.1", + "inputs": [], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + }, + { + "type": "IdentitySIMD", + "name": "identity_simd", + "x": "$steps.image_producer_x.image", + }, + { + "type": "ImageProducer", + "name": "image_producer_y", + "shape": (220, 230, 3), + }, + { + "type": "MultiSIMDImageConsumerRaisingDim", + "name": "image_consumer", + "images_x": "$steps.identity_simd.x", + "images_y": "$steps.image_producer_y.image", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.image_consumer.shapes", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_multiple_scalar_producers_feeding_simd_consumer_raising_dim( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + + # then + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CONSUMER_RAISING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCERS, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run(runtime_parameters={}) + + # then + assert result == [{"shapes": "[192, 168, 3][220, 230, 3]"}] + + +WORKFLOW_WITH_SIMD_CONSUMER_RAISING_OUTPUT_DIM_TWICE_FED_BY_SCALAR_PRODUCERS = { + "version": "1.1", + "inputs": [], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + }, + { + "type": "IdentitySIMD", + "name": "identity_simd", + "x": "$steps.image_producer_x.image", + }, + { + "type": "ImageProducer", + "name": "image_producer_y", + "shape": (220, 230, 3), + }, + { + "type": "MultiSIMDImageConsumerRaisingDim", + "name": "image_consumer", + "images_x": "$steps.identity_simd.x", + "images_y": "$steps.image_producer_y.image", + }, + { + "type": "BoostDimensionality", + "name": "dim_boost", + "x": "$steps.image_consumer.shapes", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.dim_boost.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_multiple_scalar_producers_feeding_simd_consumer_raising_dim_twice( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + + # then + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CONSUMER_RAISING_OUTPUT_DIM_TWICE_FED_BY_SCALAR_PRODUCERS, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run(runtime_parameters={}) + + # then + assert result == [ + {"shapes": ["[192, 168, 3][220, 230, 3]", "[192, 168, 3][220, 230, 3]"]} + ] + + +WORKFLOW_WITH_SIMD_CONSUMER_RAISING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCERS_AND_FLOW_CONTROL_AT_DIM_0 = { + "version": "1.1", + "inputs": [], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + }, + { + "type": "AlwaysTerminate", + "name": "condition_batch_2", + "x": "$steps.image_producer_x.image", + "next_steps": ["$steps.identity_simd"], + }, + { + "type": "IdentitySIMD", + "name": "identity_simd", + "x": "$steps.image_producer_x.image", + }, + { + "type": "ImageProducer", + "name": "image_producer_y", + "shape": (220, 230, 3), + }, + { + "type": "MultiSIMDImageConsumerRaisingDim", + "name": "image_consumer", + "images_x": "$steps.identity_simd.x", + "images_y": "$steps.image_producer_y.image", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.image_consumer.shapes", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_multiple_scalar_producers_feeding_simd_consumer_raising_dim_and_flow_control_at_dim_0( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + + # then + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CONSUMER_RAISING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCERS_AND_FLOW_CONTROL_AT_DIM_0, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run(runtime_parameters={}) + + # then + assert result == [{"shapes": None}] + + +WORKFLOW_WITH_SIMD_CONSUMER_RAISING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCERS_AND_FLOW_CONTROL_AT_DIM_1 = { + "version": "1.1", + "inputs": [], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + }, + { + "type": "IdentitySIMD", + "name": "identity_simd", + "x": "$steps.image_producer_x.image", + }, + { + "type": "ImageProducer", + "name": "image_producer_y", + "shape": (220, 230, 3), + }, + { + "type": "MultiSIMDImageConsumerRaisingDim", + "name": "image_consumer", + "images_x": "$steps.identity_simd.x", + "images_y": "$steps.image_producer_y.image", + }, + { + "type": "AlwaysTerminate", + "name": "condition_batch_2", + "x": "$steps.image_consumer.shapes", + "next_steps": ["$steps.identity_simd_2"], + }, + { + "type": "IdentitySIMD", + "name": "identity_simd_2", + "x": "$steps.image_consumer.shapes", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.identity_simd_2.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_multiple_scalar_producers_feeding_simd_consumer_raising_dim_and_flow_control_at_dim_1( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + + # then + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CONSUMER_RAISING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCERS_AND_FLOW_CONTROL_AT_DIM_1, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run(runtime_parameters={}) + + # then + assert result == [{"shapes": None}] + + +WORKFLOW_WITH_SIMD_CONSUMER_RAISING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCER_AND_BATCH_INPUT = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + ], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + }, + { + "type": "IdentitySIMD", + "name": "identity_simd", + "x": "$steps.image_producer_x.image", + }, + { + "type": "MultiSIMDImageConsumerRaisingDim", + "name": "image_consumer", + "images_x": "$steps.identity_simd.x", + "images_y": "$inputs.image_1", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.image_consumer.shapes", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_scalar_producer_and_batch_input_feeding_simd_consumer_raising_dim( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + + # then + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CONSUMER_RAISING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCER_AND_BATCH_INPUT, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((292, 168, 3)), np.zeros((392, 168, 3))] + } + ) + + # then + assert result == [ + {"shapes": ["[192, 168, 3][292, 168, 3]"]}, + {"shapes": ["[192, 168, 3][392, 168, 3]"]}, + ] + + +WORKFLOW_WITH_SIMD_CONSUMER_RAISING_OUTPUT_DIM_FED_BY_BATCH_INPUTS = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_2"}, + ], + "steps": [ + { + "type": "IdentitySIMD", + "name": "identity_simd", + "x": "$inputs.image_1", + }, + { + "type": "MultiSIMDImageConsumerRaisingDim", + "name": "image_consumer", + "images_x": "$steps.identity_simd.x", + "images_y": "$inputs.image_2", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.image_consumer.shapes", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_batch_inputs_feeding_simd_consumer_raising_dim( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + + # then + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CONSUMER_RAISING_OUTPUT_DIM_FED_BY_BATCH_INPUTS, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((292, 168, 3)), np.zeros((392, 168, 3))], + "image_2": [np.zeros((293, 168, 3)), np.zeros((393, 168, 3))], + } + ) + + # then + assert result == [ + {"shapes": ["[292, 168, 3][293, 168, 3]"]}, + {"shapes": ["[392, 168, 3][393, 168, 3]"]}, + ] From 546fad8ae18fa00f51f1fc5a754467d23f9067f6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Mon, 25 Aug 2025 09:46:43 +0200 Subject: [PATCH 13/20] Add first part of changelog --- .../execution_engine/v1/compiler/graph_constructor.py | 7 ------- inference/core/workflows/execution_engine/v1/core.py | 2 +- tests/inference/hosted_platform_tests/test_workflows.py | 2 +- .../inference/integration_tests/test_workflow_endpoints.py | 2 +- 4 files changed, 3 insertions(+), 10 deletions(-) diff --git a/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py b/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py index c95d5ca02a..3c9400ad7c 100644 --- a/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py +++ b/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py @@ -1785,13 +1785,6 @@ def establish_batch_oriented_step_lineage( ) if output_dimensionality_offset < 0: result_dimensionality = reference_lineage[:output_dimensionality_offset] - # if len(result_dimensionality) == 0: - # raise StepOutputLineageError( - # public_message=f"Step {step_selector} is to decrease dimensionality, but it is not possible if " - # f"input dimensionality is not greater or equal 2, otherwise output would not " - # f"be batch-oriented.", - # context="workflow_compilation | execution_graph_construction | establishing_step_output_lineage", - # ) return result_dimensionality if output_dimensionality_offset == 0: return reference_lineage diff --git a/inference/core/workflows/execution_engine/v1/core.py b/inference/core/workflows/execution_engine/v1/core.py index 3f0134b506..9df7fb33d4 100644 --- a/inference/core/workflows/execution_engine/v1/core.py +++ b/inference/core/workflows/execution_engine/v1/core.py @@ -23,7 +23,7 @@ validate_runtime_input, ) -EXECUTION_ENGINE_V1_VERSION = Version("1.5.0") +EXECUTION_ENGINE_V1_VERSION = Version("1.6.0") class ExecutionEngineV1(BaseExecutionEngine): diff --git a/tests/inference/hosted_platform_tests/test_workflows.py b/tests/inference/hosted_platform_tests/test_workflows.py index 33654bd321..8d34b6c3b2 100644 --- a/tests/inference/hosted_platform_tests/test_workflows.py +++ b/tests/inference/hosted_platform_tests/test_workflows.py @@ -129,7 +129,7 @@ def test_get_versions_of_execution_engine(object_detection_service_url: str) -> # then response.raise_for_status() response_data = response.json() - assert response_data["versions"] == ["1.5.0"] + assert response_data["versions"] == ["1.6.0"] FUNCTION = """ diff --git a/tests/inference/integration_tests/test_workflow_endpoints.py b/tests/inference/integration_tests/test_workflow_endpoints.py index e8d3c38135..da98e9ecc2 100644 --- a/tests/inference/integration_tests/test_workflow_endpoints.py +++ b/tests/inference/integration_tests/test_workflow_endpoints.py @@ -691,7 +691,7 @@ def test_get_versions_of_execution_engine(server_url: str) -> None: # then response.raise_for_status() response_data = response.json() - assert response_data["versions"] == ["1.5.0"] + assert response_data["versions"] == ["1.6.0"] def test_getting_block_schema_using_get_endpoint(server_url) -> None: From 0b383780fd70ccf0bba2b8c5b5c6875f759dd559 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Mon, 25 Aug 2025 09:50:43 +0200 Subject: [PATCH 14/20] Revert the order of EE changelog --- docs/workflows/execution_engine_changelog.md | 137 ++++++++++--------- 1 file changed, 69 insertions(+), 68 deletions(-) diff --git a/docs/workflows/execution_engine_changelog.md b/docs/workflows/execution_engine_changelog.md index 7fc53c7951..c5cc451940 100644 --- a/docs/workflows/execution_engine_changelog.md +++ b/docs/workflows/execution_engine_changelog.md @@ -2,42 +2,54 @@ Below you can find the changelog for Execution Engine. -## Execution Engine `v1.2.0` | inference `v0.23.0` - -* The [`video_metadata` kind](/workflows/kinds/video_metadata.md) has been deprecated, and we **strongly recommend discontinuing its use for building -blocks moving forward**. As an alternative, the [`image` kind](/workflows/kinds/image.md) has been extended to support the same metadata as -[`video_metadata` kind](/workflows/kinds/video_metadata.md), which can now be provided optionally. This update is -**non-breaking** for existing blocks, but **some older blocks** that produce images **may become incompatible** with -**future** video processing blocks. - -??? warning "Potential blocks incompatibility" +## Execution Engine `v1.5.0` | inference `v0.38.0` - As previously mentioned, adding `video_metadata` as an optional field to the internal representation of - [`image` kind](/workflows/kinds/image.md) (`WorkflowImageData` class) - may introduce some friction between existing blocks that output the [`image` kind](/workflows/kinds/image.md) and - future video processing blocks that rely on `video_metadata` being part of `image` representation. - - The issue arises because, while we can provide **default** values for `video_metadata` in `image` without - explicitly copying them from the input, any non-default metadata that was added upstream may be lost. - This can lead to downstream blocks that depend on the `video_metadata` not functioning as expected. +!!! Note "Change does not require any action" + + This change does not require any change from Workflows users. This is just performance optimisation. - We've updated all existing `roboflow_core` blocks to account for this, but blocks created before this change in - external repositories may cause issues in workflows where their output images are used by video processing blocks. +* Exposed new parameter in the init method of `BaseExecutionEngine` class - `executor` which can accept instance of +Python `ThreadPoolExecutor` to be used by execution engine. Thanks to this change, processing should be faster, as +each `BaseExecutionEngine.run(...)` will not require dedicated instance of `ThreadPoolExecutor` as it was so far. +Additionally, we are significantly limiting threads spawning which may also be a benefit in some installations. +* Despite the change, Execution Engine maintains the limit of concurrently executed steps - by limiting the number of +steps that run through the executor at a time (since Execution Engine is no longer in control of `ThreadPoolExecutor` +creation, and it is possible for the pool to have more workers available). -* While the deprecated [`video_metadata` kind](/workflows/kinds/video_metadata.md) is still available for use, it will be fully removed in -Execution Engine version `v2.0.0`. +??? Hint "How to inject `ThreadPoolExecutor` to Execution Engine?" + + ```python + from concurrent.futures import ThreadPoolExecutor + workflow_init_parameters = { ... } + with ThreadPoolExecutor(max_workers=...) as thread_pool_executor: + execution_engine = ExecutionEngine.init( + init_parameters=workflow_init_parameters, + max_concurrent_steps=4, + workflow_id="your-workflow-id", + executor=thread_pool_executor, + ) + runtime_parameters = { + "image": cv2.imread("your-image-path") + } + results = execution_engine.run(runtime_parameters=runtime_parameters) + ``` -!!! warning "Breaking change planned - Execution Engine `v2.0.0`" +## Execution Engine `v1.4.0` | inference `v0.29.0` - [`video_metadata` kind](/workflows/kinds/video_metadata.md) got deprecated and will be removed in `v2.0.0` +* Added new kind - [`secret`](/workflows/kinds/secret.md) to represent credentials. **No action needed** for existing +blocks, yet it is expected that over time blocks developers should use this kind, whenever block is to accept secret +value as parameter. +* Fixed issue with results serialization introduced in `v1.3.0` - by mistake, Execution Engine was not serializing +non-batch oriented outputs. -* As a result of the changes mentioned above, the internal representation of the [`image` kind](/workflows/kinds/image.md) has been updated to -include a new `video_metadata` property. This property can be optionally set in the constructor; if not provided, -a default value with reasonable defaults will be used. To simplify metadata manipulation within blocks, we have -introduced two new class methods: `WorkflowImageData.copy_and_replace(...)` and `WorkflowImageData.create_crop(...)`. -For more details, refer to the updated [`WoorkflowImageData` usage guide](/workflows/internal_data_types.md#workflowimagedata). +* Fixed Execution Engine bug with preparing inputs for steps. For non-SIMD steps before, while collecting inputs +in runtime, `WorkflowBlockManifest.accepts_empty_input()` method result was being ignored - causing the bug when +one non-SIMD step was feeding empty values to downstream blocks. Additionally, in the light of changes made in `v1.3.0`, +thanks to which non-SIMD blocks can easily feed inputs for downstream SIMD steps - it is needed to check if +upstream non-SIMD block yielded non-empty results (as SIMD block may not accept empty results). This check was added. +**No action needed** for existing blocks, but this fix may fix previously broken Workflows. ## Execution Engine `v1.3.0` | inference `v0.27.0` @@ -303,52 +315,41 @@ subsets of steps**, enabling building such tools as debuggers. serializer/deserializer defined as the last one will be in use. -## Execution Engine `v1.4.0` | inference `v0.29.0` +## Execution Engine `v1.2.0` | inference `v0.23.0` -* Added new kind - [`secret`](/workflows/kinds/secret.md) to represent credentials. **No action needed** for existing -blocks, yet it is expected that over time blocks developers should use this kind, whenever block is to accept secret -value as parameter. +* The [`video_metadata` kind](/workflows/kinds/video_metadata.md) has been deprecated, and we **strongly recommend discontinuing its use for building +blocks moving forward**. As an alternative, the [`image` kind](/workflows/kinds/image.md) has been extended to support the same metadata as +[`video_metadata` kind](/workflows/kinds/video_metadata.md), which can now be provided optionally. This update is +**non-breaking** for existing blocks, but **some older blocks** that produce images **may become incompatible** with +**future** video processing blocks. -* Fixed issue with results serialization introduced in `v1.3.0` - by mistake, Execution Engine was not serializing -non-batch oriented outputs. +??? warning "Potential blocks incompatibility" -* Fixed Execution Engine bug with preparing inputs for steps. For non-SIMD steps before, while collecting inputs -in runtime, `WorkflowBlockManifest.accepts_empty_input()` method result was being ignored - causing the bug when -one non-SIMD step was feeding empty values to downstream blocks. Additionally, in the light of changes made in `v1.3.0`, -thanks to which non-SIMD blocks can easily feed inputs for downstream SIMD steps - it is needed to check if -upstream non-SIMD block yielded non-empty results (as SIMD block may not accept empty results). This check was added. -**No action needed** for existing blocks, but this fix may fix previously broken Workflows. + As previously mentioned, adding `video_metadata` as an optional field to the internal representation of + [`image` kind](/workflows/kinds/image.md) (`WorkflowImageData` class) + may introduce some friction between existing blocks that output the [`image` kind](/workflows/kinds/image.md) and + future video processing blocks that rely on `video_metadata` being part of `image` representation. + + The issue arises because, while we can provide **default** values for `video_metadata` in `image` without + explicitly copying them from the input, any non-default metadata that was added upstream may be lost. + This can lead to downstream blocks that depend on the `video_metadata` not functioning as expected. + We've updated all existing `roboflow_core` blocks to account for this, but blocks created before this change in + external repositories may cause issues in workflows where their output images are used by video processing blocks. -## Execution Engine `v1.5.0` | inference `v0.38.0` -!!! Note "Change does not require any action" - - This change does not require any change from Workflows users. This is just performance optimisation. +* While the deprecated [`video_metadata` kind](/workflows/kinds/video_metadata.md) is still available for use, it will be fully removed in +Execution Engine version `v2.0.0`. -* Exposed new parameter in the init method of `BaseExecutionEngine` class - `executor` which can accept instance of -Python `ThreadPoolExecutor` to be used by execution engine. Thanks to this change, processing should be faster, as -each `BaseExecutionEngine.run(...)` will not require dedicated instance of `ThreadPoolExecutor` as it was so far. -Additionally, we are significantly limiting threads spawning which may also be a benefit in some installations. +!!! warning "Breaking change planned - Execution Engine `v2.0.0`" + + [`video_metadata` kind](/workflows/kinds/video_metadata.md) got deprecated and will be removed in `v2.0.0` + + +* As a result of the changes mentioned above, the internal representation of the [`image` kind](/workflows/kinds/image.md) has been updated to +include a new `video_metadata` property. This property can be optionally set in the constructor; if not provided, +a default value with reasonable defaults will be used. To simplify metadata manipulation within blocks, we have +introduced two new class methods: `WorkflowImageData.copy_and_replace(...)` and `WorkflowImageData.create_crop(...)`. +For more details, refer to the updated [`WoorkflowImageData` usage guide](/workflows/internal_data_types.md#workflowimagedata). -* Despite the change, Execution Engine maintains the limit of concurrently executed steps - by limiting the number of -steps that run through the executor at a time (since Execution Engine is no longer in control of `ThreadPoolExecutor` -creation, and it is possible for the pool to have more workers available). -??? Hint "How to inject `ThreadPoolExecutor` to Execution Engine?" - - ```python - from concurrent.futures import ThreadPoolExecutor - workflow_init_parameters = { ... } - with ThreadPoolExecutor(max_workers=...) as thread_pool_executor: - execution_engine = ExecutionEngine.init( - init_parameters=workflow_init_parameters, - max_concurrent_steps=4, - workflow_id="your-workflow-id", - executor=thread_pool_executor, - ) - runtime_parameters = { - "image": cv2.imread("your-image-path") - } - results = execution_engine.run(runtime_parameters=runtime_parameters) - ``` From 714d88cdd217d04c5d5c7235e87efbcb1837e230 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Mon, 25 Aug 2025 11:31:42 +0200 Subject: [PATCH 15/20] Add first part of changelog --- docs/workflows/execution_engine_changelog.md | 86 ++++++++++++++++++++ 1 file changed, 86 insertions(+) diff --git a/docs/workflows/execution_engine_changelog.md b/docs/workflows/execution_engine_changelog.md index c5cc451940..031319d39f 100644 --- a/docs/workflows/execution_engine_changelog.md +++ b/docs/workflows/execution_engine_changelog.md @@ -2,6 +2,92 @@ Below you can find the changelog for Execution Engine. +## Execution Engine `v1.6.0` | inference `v0.53.0` + +!!! Note "Change may require attention" + + This release introduces upgrades and new features with **no changes required** to existing workflows. + Some blocks may need to be upgraded to take advantage of the latest Execution Engine capabilities. + +Prior versions of the Execution Engine had significant limitations when interacting with certain types of +blocks - specifically those operating in Single Instruction, Multiple Data (SIMD) mode. These blocks are designed to +process batches of inputs at once, apply the same operation to each element, and return results for the entire batch. + +For example, the `run(...)` method of such a block might look like: + +```python +def run(self, image: Batch[WorkflowImageData], confidence: float): + pass +``` + +In the manifest, the `image` field is declared as accepting batches. + +The issue arose when the input image came from a block that did not operate on batches. In such cases, the +Execution Engine was unable to construct a batch from individual images, which often resulted in frustrating +compilation errors such as: + +``` +Detected invalid reference plugged into property `images` of step `$steps.model` - the step property +strictly requires batch-oriented inputs, yet the input selector holds non-batch oriented input - this indicates +the problem with construction of your Workflow - usually the problem occurs when non-batch oriented step inputs are +filled with outputs of non batch-oriented steps or non batch-oriented inputs. +``` + +In Execution Engine `v1.6.0`, this limitation has been removed, introducing the following behaviour: + +* When it is detected that a given input must be batch-oriented, a procedure called **Auto Batch Casting** is applied. +This automatically converts the input into a `Batch[T]`. Since all batch-mode inputs were already explicitly denoted in +manifests, most blocks (with exceptions noted below) benefit from this upgrade without requiring any internal changes. + +* The dimensionality (level of nesting) of an auto-batch cast parameter is determined at compilation time, based on the +context of the specific block in the workflow as well as its manifest. If other batch-oriented inputs are present +(referred to as *lineage supports*), the Execution Engine uses them as references when constructing auto-casted +batches. This ensures that the number of elements in each batch dimension matches the other data fed into the step +(simulating what would have been asserted if an actual batch input had been provided). If there are no +*lineage supports*, or if the block manifest requires it (e.g. input dimensionality offset is set), the missing +dimensions are generated similarly to the +[`torch.unsqueeze(...)` operation](https://docs.pytorch.org/docs/stable/generated/torch.unsqueeze.html). + +* Step outputs are then evaluated against the presence of an Auto Batch Casting context. Based on the evaluation, +outputs are saved either as batches or as scalars, ensuring that the effect of casting remains local, with the only +exception being output dimensionality changes introduced by the block itself. As a side effect, it is now possible to: + + * **create output batches from scalars** (when the step increases dimensionality), and + + * **collapse batches into scalars** (when the block decreases dimensionality). + +* The only potential friction point arises **when a block that does not accept batches** (and thus does not denote +batch-accepting inputs) **decreases output dimensionality**. In previous versions, the Execution Engine handled this by +applying dimensionality wrapping: all batch-oriented inputs were wrapped with an additional `Batch[T]` dimension, +allowing the block’s `run(...)` method to perform reduce operations across the list dimension. With Auto Batch Casting, +however, such blocks no longer provide the Execution Engine with a clear signal about whether certain inputs are +scalars or batches, making casting nondeterministic. To address this, a new manifest method was introduced: +`get_parameters_enforcing_auto_batch_casting(...)`. This method must return the list of parameters for which batch +casting should be enforced when dimensionality is decreased. It is not expected to be used in any other context. + +* In earlier versions, a hard constraint existed: dimensionality collapse could only occur at levels ≥ 2 (i.e. only +on nested batches). This limitation is now removed. Dimensionality collapse blocks may also operate on scalars, with +the output dimensionality “bouncing off” the zero ground. + +* There is one **important limitation** uncovered by these changes. Since Auto Batch Casting allows scalars to be +converted into batches (when a scalar is fed into a block that increases dimensionality), it is possible to end up with +multiple batches at the first nesting level, each with a different origin (lineage). In this case, the current +Execution Engine implementation cannot deterministically construct the output. Previous versions assumed that outputs +were always lists of elements, with the order determined by the input batch. With dynamically generated batches, +this assumption no longer holds. Fixing this design flaw would require a breaking change for all customers, +so it is deferred to **Execution Engine v2.0**. For now, an assertion has been introduced in the code, raising the +following error: + +``` +Workflow Compiler detected that the workflow contains multiple elements which create +top-level data batches - for instance inputs and blocks that create batched outputs from +scalar parameters. We know it sounds convoluted, but the bottom line is that this +situation is known limitation of Workflows Compiler. +Contact Roboflow team through github issues (https://github.com/roboflow/inference/issues) +providing full context of the problem - including workflow definition you use. +``` + + ## Execution Engine `v1.5.0` | inference `v0.38.0` !!! Note "Change does not require any action" From 2eb0ba3da7a447b1f592048f12de9929c9a6a970 Mon Sep 17 00:00:00 2001 From: "codeflash-ai[bot]" <148906541+codeflash-ai[bot]@users.noreply.github.com> Date: Mon, 25 Aug 2025 10:24:22 +0000 Subject: [PATCH 16/20] =?UTF-8?q?=E2=9A=A1=EF=B8=8F=20Speed=20up=20functio?= =?UTF-8?q?n=20`construct=5Fsimd=5Fstep=5Finput`=20by=2037%=20in=20PR=20#1?= =?UTF-8?q?504=20(`feature/try-to-beat-the-limitation-of-ee-in-terms-of-si?= =?UTF-8?q?ngular-elements-pushed-into-batch-inputs`)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The optimized code achieves a **36% speedup** through a single but impactful conditional check optimization in the `prepare_parameters` function. **Key Optimization:** The main performance improvement comes from adding an `if empty_indices:` check before executing expensive list comprehension and data removal operations: ```python # Original: Always executes these expensive operations indices = [e for e in indices if e not in empty_indices] result = remove_indices(value=result, indices=empty_indices) # Optimized: Only executes when empty_indices is non-empty if empty_indices: indices = [e for e in indices if e not in empty_indices] result = remove_indices(value=result, indices=empty_indices) ``` **Why this optimization works:** - In many test cases, `empty_indices` is an empty set, making the filtering operations unnecessary - The list comprehension `[e for e in indices if e not in empty_indices]` has O(n*m) complexity where n=len(indices) and m=len(empty_indices) - `remove_indices()` recursively processes nested data structures, which is expensive even for empty removal sets - By avoiding these operations when `empty_indices` is empty, we eliminate significant computational overhead **Performance impact by test case type:** - **Large batch inputs** see the biggest gains (43-107% faster) because they avoid expensive O(n) operations on large datasets when no filtering is needed - **Basic test cases** show consistent 15-25% improvements from avoiding unnecessary operations - **Edge cases with actual empty elements** may see minimal or slightly negative impact (0.5% slower) due to the additional conditional check, but this is negligible compared to the gains in common cases This optimization is particularly effective because most workflow executions don't have empty batch elements that need filtering, making the conditional check a highly beneficial guard against unnecessary work. --- .../executor/execution_data_manager/step_input_assembler.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py index 89fc07427c..05faf9277c 100644 --- a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py +++ b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py @@ -431,8 +431,9 @@ def prepare_parameters( parameters={}, ) empty_indices = get_empty_batch_elements_indices(value=result) - indices = [e for e in indices if e not in empty_indices] - result = remove_indices(value=result, indices=empty_indices) + if empty_indices: + indices = [e for e in indices if e not in empty_indices] + result = remove_indices(value=result, indices=empty_indices) return BatchModeSIMDStepInput( indices=indices, parameters=result, From 831583a0dc8cb8f9f86afe76efa87eee7165a508 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Mon, 25 Aug 2025 16:31:59 +0200 Subject: [PATCH 17/20] Clarify docs and fix issue with input parameters not being broadcast properly --- docs/workflows/create_workflow_block.md | 26 +++-- docs/workflows/execution_engine_changelog.md | 99 ++++++++++++++++++- docs/workflows/workflow_execution.md | 8 ++ docs/workflows/workflows_execution_engine.md | 29 ++++++ inference/core/version.py | 2 +- .../fusion/dimension_collapse/v1.py | 4 + .../introspection/schema_parser.py | 5 +- .../v1/compiler/graph_constructor.py | 21 +++- .../v1/dynamic_blocks/block_assembler.py | 4 +- .../v1/dynamic_blocks/entities.py | 7 ++ .../step_input_assembler.py | 25 ++++- .../plugin_image_producer/__init__.py | 18 +++- 12 files changed, 228 insertions(+), 20 deletions(-) diff --git a/docs/workflows/create_workflow_block.md b/docs/workflows/create_workflow_block.md index 1fc1d2b6c3..c09b72c5a5 100644 --- a/docs/workflows/create_workflow_block.md +++ b/docs/workflows/create_workflow_block.md @@ -1528,7 +1528,7 @@ the method signatures. In this example, the block visualises crops predictions and creates tiles presenting all crops predictions in single output image. - ```{ .py linenums="1" hl_lines="29-31 48-49 59-60"} + ```{ .py linenums="1" hl_lines="30-32 34-36 53-55 65-66"} from typing import List, Literal, Type, Union import supervision as sv @@ -1556,10 +1556,15 @@ the method signatures. crops_predictions: Selector( kind=[OBJECT_DETECTION_PREDICTION_KIND] ) + scalar_parameter: Union[float, Selector()] @classmethod def get_output_dimensionality_offset(cls) -> int: return -1 + + @classmethod + def get_parameters_enforcing_auto_batch_casting(cls) -> List[str]: + return ["crops", "crops_predictions"] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: @@ -1578,6 +1583,7 @@ the method signatures. self, crops: Batch[WorkflowImageData], crops_predictions: Batch[sv.Detections], + scalar_parameter: float, ) -> BlockResult: annotator = sv.BoxAnnotator() visualisations = [] @@ -1591,18 +1597,22 @@ the method signatures. return {"visualisations": tile} ``` - * in lines `29-31` manifest class declares output dimensionality + * in lines `30-32` manifest class declares output dimensionality offset - value `-1` should be understood as decreasing dimensionality level by `1` - * in lines `48-49` you can see the impact of output dimensionality decrease - on the method signature. Both inputs are artificially wrapped in `Batch[]` container. - This is done by Execution Engine automatically on output dimensionality decrease when - all inputs have the same dimensionality to enable access to all elements occupying - the last dimensionality level. Obviously, only elements related to the same element + * in lines `34-36` manifest class declares `run(...)` method inputs that will be subject to auto-batch casting + ensuring that the signature is always stable. Auto-batch casting was introduced in Execution Engine `v0.1.6.0` + - refer to [changelog](./execution_engine_changelog.md) for more details. + + * in lines `53-55` you can see the impact of output dimensionality decrease + on the method signature. First two inputs (declared in line `36`) are artificially wrapped in `Batch[]` + container, whereas `scalar_parameter` remains primitive type. This is done by Execution Engine automatically + on output dimensionality decrease when all inputs have the same dimensionality to enable access to + all elements occupying the last dimensionality level. Obviously, only elements related to the same element from top-level batch will be grouped. For instance, if you had two input images that you cropped - crops from those two different images will be grouped separately. - * lines `59-60` illustrate how output is constructed - single value is returned and that value + * lines `65-66` illustrate how output is constructed - single value is returned and that value will be indexed by Execution Engine in output batch with reduced dimensionality === "different input dimensionalities" diff --git a/docs/workflows/execution_engine_changelog.md b/docs/workflows/execution_engine_changelog.md index 031319d39f..f1e2419b1b 100644 --- a/docs/workflows/execution_engine_changelog.md +++ b/docs/workflows/execution_engine_changelog.md @@ -56,7 +56,7 @@ exception being output dimensionality changes introduced by the block itself. As * **collapse batches into scalars** (when the block decreases dimensionality). -* The only potential friction point arises **when a block that does not accept batches** (and thus does not denote +* The two potential friction point arises - first **when a block that does not accept batches** (and thus does not denote batch-accepting inputs) **decreases output dimensionality**. In previous versions, the Execution Engine handled this by applying dimensionality wrapping: all batch-oriented inputs were wrapped with an additional `Batch[T]` dimension, allowing the block’s `run(...)` method to perform reduce operations across the list dimension. With Auto Batch Casting, @@ -65,6 +65,22 @@ scalars or batches, making casting nondeterministic. To address this, a new mani `get_parameters_enforcing_auto_batch_casting(...)`. This method must return the list of parameters for which batch casting should be enforced when dimensionality is decreased. It is not expected to be used in any other context. +!!! warning "Impact of new method on existing blocks" + + The requirement of defining `get_parameters_enforcing_auto_batch_casting(...)` method to fully use + Auto Batch Casting feature in the case described above is non-strict. If the block will not be changed, + the only effect will be that workflows wchich were **previously failing** with compilation error may + work or fail with **runtime error**, dependent on the details of block `run(...)` method implementation. + +* The second friction point arises when there is a block declaring input fields supporting batches and scalars using +`get_parameters_accepting_batches_and_scalars(...)` - by default, Execution Engine will skip auto-casting for such +parameters, as the method was historically **always a way to declare that block itself has ability to broadcast scalars +into batches** - see +[implementation of `roboflow_core/detections_transformation@v1`](/inference/core/workflows/core_steps/transformations/detections_transformation/v1.py) +block. In a way, Auto Batch Casting is *redundant* for those blocks - so we propose leaving them as is and +upgrade to use `get_parameters_enforcing_auto_batch_casting(...)` instead of +`get_parameters_accepting_batches_and_scalars(...)` in new versions of such blocks. + * In earlier versions, a hard constraint existed: dimensionality collapse could only occur at levels ≥ 2 (i.e. only on nested batches). This limitation is now removed. Dimensionality collapse blocks may also operate on scalars, with the output dimensionality “bouncing off” the zero ground. @@ -86,7 +102,88 @@ situation is known limitation of Workflows Compiler. Contact Roboflow team through github issues (https://github.com/roboflow/inference/issues) providing full context of the problem - including workflow definition you use. ``` +### Migration guide + +??? Hint "Adding `get_parameters_enforcing_auto_batch_casting(...)` method" + + Blocks which decrease output dimensionality and do not define batch-oriented inputs needs to + declare all inputs which implementation expects to have wrapped in `Batch[T]` with the new class + method of block manifest called `get_parameters_enforcing_auto_batch_casting(...)` + + ```{ .py linenums="1" hl_lines="34-36 53-54"} + from typing import List, Literal, Type, Union + + import supervision as sv + + from inference.core.workflows.execution_engine.entities.base import ( + Batch, + OutputDefinition, + WorkflowImageData, + ) + from inference.core.workflows.execution_engine.entities.types import ( + IMAGE_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + Selector, + ) + from inference.core.workflows.prototypes.block import ( + BlockResult, + WorkflowBlock, + WorkflowBlockManifest, + ) + + + class BlockManifest(WorkflowBlockManifest): + type: Literal["my_plugin/tile_detections@v1"] + crops: Selector(kind=[IMAGE_KIND]) + crops_predictions: Selector( + kind=[OBJECT_DETECTION_PREDICTION_KIND] + ) + scalar_parameter: Union[float, Selector()] + + @classmethod + def get_output_dimensionality_offset(cls) -> int: + return -1 + + @classmethod + def get_parameters_enforcing_auto_batch_casting(cls) -> List[str]: + return ["crops", "crops_predictions"] + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [ + OutputDefinition(name="visualisations", kind=[IMAGE_KIND]), + ] + + + class TileDetectionsBlock(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return BlockManifest + + def run( + self, + crops: Batch[WorkflowImageData], + crops_predictions: Batch[sv.Detections], + scalar_parameter: float, + ) -> BlockResult: + print("This is parameter which will not be auto-batch cast!", scalar_parameter) + annotator = sv.BoxAnnotator() + visualisations = [] + for image, prediction in zip(crops, crops_predictions): + annotated_image = annotator.annotate( + image.numpy_image.copy(), + prediction, + ) + visualisations.append(annotated_image) + tile = sv.create_tiles(visualisations) + return {"visualisations": tile} + ``` + + * in lines `34-36` one needs to add declaration of fields that will be subject to enforced auto-batch casting + * as a result of the above, input parameters of run method (lines `53-54`) will be wrapped into `Batch[T]` by + Execution Engine. ## Execution Engine `v1.5.0` | inference `v0.38.0` diff --git a/docs/workflows/workflow_execution.md b/docs/workflows/workflow_execution.md index 927c8ec39b..d3ca87347d 100644 --- a/docs/workflows/workflow_execution.md +++ b/docs/workflows/workflow_execution.md @@ -124,6 +124,14 @@ influencing the processing for all elements in the batch and this type of data w the reference images remain unchanged as you process each input. Thus, the reference images are considered *scalar* data, while the list of input images is *batch-oriented*. + **Great news!** + + Since Execution Engine `v1.6.0`, the practical aspects of dealing with *scalars* and *batches* are offloaded to + the Execution Engine (refer to [changelog](./execution_engine_changelog.md) for more details). As a block + developer, it is still important to understand the difference, but when building blocks you are not forced to + think about the nuances that much. + + To illustrate the distinction, Workflow definitions hold inputs of the two categories: - **Scalar inputs** - like `WorkflowParameter` diff --git a/docs/workflows/workflows_execution_engine.md b/docs/workflows/workflows_execution_engine.md index d0fe5f343c..7ba6a60fa3 100644 --- a/docs/workflows/workflows_execution_engine.md +++ b/docs/workflows/workflows_execution_engine.md @@ -86,6 +86,35 @@ batch-oriented input, it will be treated as a SIMD step. Non-SIMD steps, by contrast, are expected to deliver a single result for the input data. In the case of non-SIMD flow-control steps, they affect all downstream steps as a whole, rather than individually for each element in a batch. +Historically, Execution Engine could not handle well al scenarios when non-SIMD steps' outputs were fed into SIMD steps +inputs - causing compilation error due to lack of ability to automatically cast such outputs into batches when feeding +into SIMD seps. Starting with Execution Engine `v1.6.0`, the handling of SIMD and non-SIMD blocks has been improved +through the introduction of **Auto Batch Casting**: + +* When a SIMD input is detected but receives scalar data, the Execution Engine automatically casts it into a batch. + +* The dimensionality of the batch is determined at compile time, using *lineage* information from other +batch-oriented inputs when available. Missing dimensions are generated in a manner similar to `torch.unsqueeze(...)`. + +* Outputs are evaluated against the casting context - leaving them as scalars when block keeps or decreases output +dimensionality or **creating new batches** when increase of dimensionality is expected. + +!!! warning "We don't support multiple sources of batch-oriented data" + + While Auto Batch Casting simplifies mixing SIMD and non-SIMD blocks, there is one major limitation to be aware of. + + If multiple first-level batches are created from different origins (for instance inputs and steps taking scalars + and raising output dimensionality into batch at first level of depth), the Execution Engine cannot deterministically + construct the output. In previous versions, the assumption was that **outputs were lists directly tied to inputs + batch order**. With Auto Batch Casting, batches may also be generated dynamically, and no deterministic ordering + can be guaranteed (imagine scenario when you feed batch of 4 images, and there is a block generating dynamic batch + with 3 images - when results are to be returned, Execution Engine is unable to determine a single input batch which + would dictate output order alignment, which is a hard requirement caused by falty design choices). + + To prevent unpredictable behaviour, the Execution Engine asserts in this scenario and raises an error instead of + proceeding. Resolving this design flaw requires breaking changes and is therefore deferred to + **Execution Engine v2.0.** + ### Preparing step inputs diff --git a/inference/core/version.py b/inference/core/version.py index 550f8fa573..ab9744ad8f 100644 --- a/inference/core/version.py +++ b/inference/core/version.py @@ -1,4 +1,4 @@ -__version__ = "0.52.1" +__version__ = "0.53.0" if __name__ == "__main__": diff --git a/inference/core/workflows/core_steps/fusion/dimension_collapse/v1.py b/inference/core/workflows/core_steps/fusion/dimension_collapse/v1.py index 7f6e60d669..3f4c8512bc 100644 --- a/inference/core/workflows/core_steps/fusion/dimension_collapse/v1.py +++ b/inference/core/workflows/core_steps/fusion/dimension_collapse/v1.py @@ -59,6 +59,10 @@ def get_output_dimensionality_offset( ) -> int: return -1 + @classmethod + def get_parameters_enforcing_auto_batch_casting(cls) -> List[str]: + return ["data"] + @classmethod def describe_outputs(cls) -> List[OutputDefinition]: return [ diff --git a/inference/core/workflows/execution_engine/introspection/schema_parser.py b/inference/core/workflows/execution_engine/introspection/schema_parser.py index 256a85133b..2dc7b40b8c 100644 --- a/inference/core/workflows/execution_engine/introspection/schema_parser.py +++ b/inference/core/workflows/execution_engine/introspection/schema_parser.py @@ -334,7 +334,10 @@ def retrieve_selectors_from_simple_property( ) if declared_points_to_batch == "dynamic": if property_name in inputs_accepting_batches_and_scalars: - points_to_batch = {True, False} + if property_name in inputs_enforcing_auto_batch_casting: + points_to_batch = {True} + else: + points_to_batch = {True, False} else: points_to_batch = { property_name in inputs_accepting_batches diff --git a/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py b/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py index 3c9400ad7c..499cb9f619 100644 --- a/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py +++ b/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py @@ -1577,18 +1577,31 @@ def verify_declared_batch_compatibility_against_actual_inputs( batch_compatibility_of_properties: Dict[str, Set[bool]], ) -> Set[str]: scalar_parameters_to_be_batched = set() + parameters_accepting_batches_and_scalars = set( + step_node_data.step_manifest.get_parameters_accepting_batches_and_scalars() + ) + hardcoded_inputs_to_be_batch_compatible = set( + step_node_data.step_manifest.get_parameters_enforcing_auto_batch_casting() + + step_node_data.step_manifest.get_parameters_accepting_batches() + ) for property_name, input_definition in input_data.items(): if property_name not in batch_compatibility_of_properties: - # only values plugged via selectors are to be validated - continue - if input_definition.is_compound_input(): + actual_input_is_batch = {False} + if property_name in parameters_accepting_batches_and_scalars: + batch_compatibility = {True, False} + elif property_name in hardcoded_inputs_to_be_batch_compatible: + batch_compatibility = {True} + else: + continue + elif input_definition.is_compound_input(): actual_input_is_batch = { element.is_batch_oriented() for element in input_definition.iterate_through_definitions() } + batch_compatibility = batch_compatibility_of_properties[property_name] else: actual_input_is_batch = {input_definition.is_batch_oriented()} - batch_compatibility = batch_compatibility_of_properties[property_name] + batch_compatibility = batch_compatibility_of_properties[property_name] step_accepts_batch_input = step_node_data.step_manifest.accepts_batch_input() if ( step_accepts_batch_input diff --git a/inference/core/workflows/execution_engine/v1/dynamic_blocks/block_assembler.py b/inference/core/workflows/execution_engine/v1/dynamic_blocks/block_assembler.py index bcf4e88814..e8702a71cf 100644 --- a/inference/core/workflows/execution_engine/v1/dynamic_blocks/block_assembler.py +++ b/inference/core/workflows/execution_engine/v1/dynamic_blocks/block_assembler.py @@ -381,7 +381,9 @@ def assembly_manifest_class_methods( "get_parameters_accepting_batches_and_scalars", classmethod(get_parameters_accepting_batches_and_scalars), ) - get_parameters_enforcing_auto_batch_casting = lambda cls: list() + get_parameters_enforcing_auto_batch_casting = ( + lambda cls: manifest_description.get_parameters_enforcing_auto_batch_casting + ) setattr( manifest_class, "get_parameters_enforcing_auto_batch_casting", diff --git a/inference/core/workflows/execution_engine/v1/dynamic_blocks/entities.py b/inference/core/workflows/execution_engine/v1/dynamic_blocks/entities.py index 6e6e6a72fe..7fbb4d2c4d 100644 --- a/inference/core/workflows/execution_engine/v1/dynamic_blocks/entities.py +++ b/inference/core/workflows/execution_engine/v1/dynamic_blocks/entities.py @@ -116,6 +116,13 @@ class ManifestDescription(BaseModel): "Value will override `accepts_batch_input` if non-empty " "list is provided, `accepts_batch_input` is kept not to break backward compatibility.", ) + get_parameters_enforcing_auto_batch_casting: List[str] = Field( + default_factory=list, + description="List of parameters, for which auto-batch casting should be enforced, making sure that the block " + "run(...) method will always receive the parameters as batches, not scalars. This property is important for " + "blocks decreasing output dimensionality which do not define neither `batch_oriented_parameters` nor " + "`parameters_with_scalars_and_batches`.", + ) class PythonCode(BaseModel): diff --git a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py index 89fc07427c..990943cd82 100644 --- a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py +++ b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py @@ -577,7 +577,30 @@ def get_non_compound_parameter_value( ) else: static_input: StaticStepInputDefinition = parameter # type: ignore - return static_input.value, None, False + if not requested_as_batch or static_input.value is None: + # when we have Optional[Selector()] in manifest - we must retain + # ability to inject None into the run(...) parameters - as + # if we treat that as actual batch and broadcast Batch[None], + # we would behave exactly as condition execution does - + # and the logic executing after this, will filter-out empty + # elements - so on None, we behave "the old way" regardless of the fact that ABC + # was requested + return static_input.value, None, False + else: + return apply_auto_batch_casting( + parameter_name=parameter.parameter_specification.parameter_name, + value=static_input.value, + auto_batch_casting_config=auto_batch_casting_lineage_supports[ + parameter.parameter_specification.parameter_name + ], + contains_empty_scalar_step_output_selector=False, + dynamic_batches_manager=dynamic_batches_manager, + step_execution_dimensionality=step_execution_dimensionality, + guard_of_indices_wrapping=guard_of_indices_wrapping, + step_requests_batch_input=step_requests_batch_input, + masks=masks, + scalars_discarded=False, + ) dynamic_parameter: DynamicStepInputDefinition = parameter # type: ignore parameter_dimensionality = dynamic_parameter.get_dimensionality() lineage_indices = dynamic_batches_manager.get_indices_for_data_lineage( diff --git a/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py b/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py index 94080364aa..c9e645ec3d 100644 --- a/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py +++ b/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py @@ -222,7 +222,11 @@ def get_execution_engine_compatibility(cls) -> Optional[str]: return ">=1.3.0,<2.0.0" @classmethod - def get_parameters_accepting_batches(cls) -> List[str]: + def get_parameters_accepting_batches_and_scalars(cls) -> List[str]: + return ["images_x", "images_y"] + + @classmethod + def get_parameters_enforcing_auto_batch_casting(cls) -> List[str]: return ["images_x", "images_y"] @classmethod @@ -897,6 +901,10 @@ class AlwaysPassManifest(WorkflowBlockManifest): def describe_outputs(cls) -> List[OutputDefinition]: return [] + @classmethod + def get_parameters_accepting_batches(cls) -> List[str]: + return ["x"] + @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: return ">=1.3.0,<2.0.0" @@ -907,8 +915,12 @@ class AlwaysPassBlock(WorkflowBlock): def get_manifest(cls) -> Type[WorkflowBlockManifest]: return AlwaysPassManifest - def run(self, x: Any, next_steps: List[StepSelector]) -> BlockResult: - return FlowControl(mode="select_step", context=next_steps) + def run(self, x: Batch[Any], next_steps: List[StepSelector]) -> BlockResult: + assert isinstance(x, Batch) + results = [] + for _ in x: + results.append(FlowControl(mode="select_step", context=next_steps)) + return results class EachSecondPassManifest(WorkflowBlockManifest): From 86d878729b578de12c98a4d76d62ea8f6bd7953d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Tue, 26 Aug 2025 12:05:06 +0200 Subject: [PATCH 18/20] Introduce output nesting for emergent dimensions --- .../workflows/execution_engine/constants.py | 2 +- .../v1/compiler/graph_constructor.py | 19 +- .../v1/executor/output_constructor.py | 188 ++++++++++++++++-- .../models/test_owlv2_max_detections.py | 14 +- ...ilation_of_workflow_with_invalid_plugin.py | 9 +- ...ng_scalars_to_fit_into_batch_parameters.py | 72 ++++--- .../executor/test_output_constructor.py | 4 +- 7 files changed, 235 insertions(+), 73 deletions(-) diff --git a/inference/core/workflows/execution_engine/constants.py b/inference/core/workflows/execution_engine/constants.py index 8e93beda09..5b14289969 100644 --- a/inference/core/workflows/execution_engine/constants.py +++ b/inference/core/workflows/execution_engine/constants.py @@ -2,7 +2,7 @@ PARSED_NODE_INPUT_SELECTORS_PROPERTY = "parsed_node_input_selectors" STEP_DEFINITION_PROPERTY = "definition" WORKFLOW_INPUT_BATCH_LINEAGE_ID = "" -TOP_LEVEL_LINEAGE_KEY = "top_level_lineage" +TOP_LEVEL_LINEAGES_KEY = "top_level_lineages" IMAGE_TYPE_KEY = "type" IMAGE_VALUE_KEY = "value" ROOT_PARENT_ID_KEY = "root_parent_id" diff --git a/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py b/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py index 499cb9f619..c5822fee54 100644 --- a/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py +++ b/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py @@ -22,7 +22,7 @@ from inference.core.workflows.execution_engine.constants import ( NODE_COMPILATION_OUTPUT_PROPERTY, PARSED_NODE_INPUT_SELECTORS_PROPERTY, - TOP_LEVEL_LINEAGE_KEY, + TOP_LEVEL_LINEAGES_KEY, WORKFLOW_INPUT_BATCH_LINEAGE_ID, ) from inference.core.workflows.execution_engine.entities.base import ( @@ -616,22 +616,7 @@ def denote_data_flow_in_workflow( ), ) execution_graph.remove_node(super_input_node) - if len(top_level_data_lineage) > 1: - raise AssumptionError( - public_message=f"Workflow Compiler detected that the workflow contains multiple elements which create " - f"top-level data batches - for instance inputs and blocks that create batched outputs from " - f"scalar parameters. We know it sounds convoluted, but the bottom line is that this " - f"situation is known limitation of Workflows Compiler. " - f"Contact Roboflow team through github issues " - f"(https://github.com/roboflow/inference/issues) providing full " - f"context of the problem - including workflow definition you use.", - context="workflow_compilation | execution_graph_construction | verification_of_batches_sources", - ) - if len(top_level_data_lineage) > 0: - top_level_data_lineage_marker = top_level_data_lineage.pop() - else: - top_level_data_lineage_marker = None - execution_graph.graph[TOP_LEVEL_LINEAGE_KEY] = top_level_data_lineage_marker + execution_graph.graph[TOP_LEVEL_LINEAGES_KEY] = top_level_data_lineage return execution_graph diff --git a/inference/core/workflows/execution_engine/v1/executor/output_constructor.py b/inference/core/workflows/execution_engine/v1/executor/output_constructor.py index 95724ab79c..e0bfff65bc 100644 --- a/inference/core/workflows/execution_engine/v1/executor/output_constructor.py +++ b/inference/core/workflows/execution_engine/v1/executor/output_constructor.py @@ -1,5 +1,6 @@ import traceback -from typing import Any, Callable, Dict, List, Optional, Union +from collections import defaultdict +from typing import Any, Callable, Dict, List, Optional, Set, Union import numpy as np import supervision as sv @@ -11,7 +12,7 @@ ) from inference.core.workflows.errors import AssumptionError, ExecutionEngineRuntimeError from inference.core.workflows.execution_engine.constants import ( - TOP_LEVEL_LINEAGE_KEY, + TOP_LEVEL_LINEAGES_KEY, WORKFLOW_INPUT_BATCH_LINEAGE_ID, ) from inference.core.workflows.execution_engine.entities.base import ( @@ -73,6 +74,21 @@ def construct_workflow_output( non_batch_outputs[output.name] = data_piece if not batch_oriented_outputs: return [non_batch_outputs] + outputs_for_generated_lineage = defaultdict(set) + outputs_for_input_dictated_lineage = set() + for output in workflow_outputs: + if output.name not in batch_oriented_outputs: + outputs_for_input_dictated_lineage.add(output.name) + continue + top_level_lineage = node_as( + execution_graph=execution_graph, + node=construct_output_selector(name=output.name), + expected_type=OutputNode, + ).data_lineage[0] + if top_level_lineage == WORKFLOW_INPUT_BATCH_LINEAGE_ID: + outputs_for_input_dictated_lineage.add(output.name) + else: + outputs_for_generated_lineage[top_level_lineage].add(output.name) dimensionality_for_output_nodes = { output.name: node_as( execution_graph=execution_graph, @@ -81,34 +97,71 @@ def construct_workflow_output( ).dimensionality for output in workflow_outputs } + results = create_outputs_for_input_induced_lineages( + output_name2indices=output_name2indices, + outputs_for_input_dictated_lineage=outputs_for_input_dictated_lineage, + workflow_outputs=workflow_outputs, + execution_data_manager=execution_data_manager, + serialize_results=serialize_results, + kinds_serializers=kinds_serializers, + kinds_of_output_nodes=kinds_of_output_nodes, + non_batch_outputs=non_batch_outputs, + dimensionality_for_output_nodes=dimensionality_for_output_nodes, + batch_oriented_outputs=batch_oriented_outputs, + ) + if len(results) == 0 and len(outputs_for_generated_lineage) > 0: + results.append({}) + for generated_lineage, outputs_names in outputs_for_generated_lineage.items(): + results_for_outputs_of_generated_lineage = ( + create_outputs_for_generated_lineage_outputs( + generated_lineage=generated_lineage, + output_name2indices=output_name2indices, + outputs_for_generated_lineage=outputs_names, + workflow_outputs=workflow_outputs, + execution_data_manager=execution_data_manager, + serialize_results=serialize_results, + kinds_serializers=kinds_serializers, + kinds_of_output_nodes=kinds_of_output_nodes, + dimensionality_for_output_nodes=dimensionality_for_output_nodes, + ) + ) + for output in results: + output.update(results_for_outputs_of_generated_lineage) + return results + + +def create_outputs_for_input_induced_lineages( + output_name2indices: Dict[str, Optional[List[tuple]]], + outputs_for_input_dictated_lineage: Set[str], + workflow_outputs: List[JsonField], + execution_data_manager: ExecutionDataManager, + serialize_results: bool, + kinds_serializers: Dict[str, Callable[[Any], Any]], + kinds_of_output_nodes: Dict[ + str, Union[List[Union[Kind, str]], Dict[str, List[Union[Kind, str]]]] + ], + non_batch_outputs: Dict[str, Any], + dimensionality_for_output_nodes: Dict[str, int], + batch_oriented_outputs: Set[str], +) -> List[Dict[str, Any]]: outputs_arrays: Dict[str, Optional[list]] = { name: create_array(indices=np.array(indices)) for name, indices in output_name2indices.items() - if name in batch_oriented_outputs + if name in outputs_for_input_dictated_lineage and name in batch_oriented_outputs + } + name2selector = { + output.name: output.selector + for output in workflow_outputs + if output.name in outputs_for_input_dictated_lineage } - name2selector = {output.name: output.selector for output in workflow_outputs} outputs_requested_in_parent_coordinates = { output.name for output in workflow_outputs if output.coordinates_system is CoordinatesSystem.PARENT } - top_level_data_lineage_marker = execution_graph.graph.get(TOP_LEVEL_LINEAGE_KEY) - if top_level_data_lineage_marker: - major_batch_size = len( - execution_data_manager.get_lineage_indices( - lineage=[top_level_data_lineage_marker] - ) - ) - if ( - major_batch_size == 0 - and top_level_data_lineage_marker != WORKFLOW_INPUT_BATCH_LINEAGE_ID - ): - # we had some dynamic dimensionality increase on top of auto-batch casting, but we - # failed to register indices due to conditional execution - major_batch_size = 1 - else: - major_batch_size = 0 - for name in batch_oriented_outputs: + for name in outputs_for_input_dictated_lineage: + if name not in batch_oriented_outputs: + continue array = outputs_arrays[name] indices = output_name2indices[name] data = execution_data_manager.get_batch_data( @@ -145,6 +198,14 @@ def construct_workflow_output( context="workflow_execution | output_construction", ) results = [] + if not outputs_arrays: + major_batch_size = 1 if len(non_batch_outputs) > 0 else 0 + else: + major_batch_size = len( + execution_data_manager.get_lineage_indices( + lineage=[WORKFLOW_INPUT_BATCH_LINEAGE_ID] + ) + ) for i in range(major_batch_size): single_result = {} for name, value in non_batch_outputs.items(): @@ -166,6 +227,91 @@ def construct_workflow_output( return results +def create_outputs_for_generated_lineage_outputs( + generated_lineage: str, + output_name2indices: Dict[str, Optional[List[tuple]]], + outputs_for_generated_lineage: Set[str], + workflow_outputs: List[JsonField], + execution_data_manager: ExecutionDataManager, + serialize_results: bool, + kinds_serializers: Dict[str, Callable[[Any], Any]], + kinds_of_output_nodes: Dict[ + str, Union[List[Union[Kind, str]], Dict[str, List[Union[Kind, str]]]] + ], + dimensionality_for_output_nodes: Dict[str, int], +) -> Dict[str, List[Any]]: + outputs_arrays: Dict[str, Optional[list]] = { + name: create_array(indices=np.array(indices)) + for name, indices in output_name2indices.items() + if name in outputs_for_generated_lineage + } + name2selector = { + output.name: output.selector + for output in workflow_outputs + if output.name in outputs_for_generated_lineage + } + outputs_requested_in_parent_coordinates = { + output.name + for output in workflow_outputs + if output.coordinates_system is CoordinatesSystem.PARENT + } + for name in outputs_for_generated_lineage: + array = outputs_arrays[name] + indices = output_name2indices[name] + data = execution_data_manager.get_batch_data( + selector=name2selector[name], + indices=indices, + ) + for index, data_piece in zip(indices, data): + if ( + name in outputs_requested_in_parent_coordinates + and data_contains_sv_detections(data=data_piece) + ): + data_piece = convert_sv_detections_coordinates(data=data_piece) + if serialize_results: + output_kind = kinds_of_output_nodes[name] + data_piece = serialize_data_piece( + output_name=name, + data_piece=data_piece, + kind=output_kind, + kinds_serializers=kinds_serializers, + ) + try: + place_data_in_array( + array=array, + index=index, + data=data_piece, + ) + except (TypeError, IndexError): + raise ExecutionEngineRuntimeError( + public_message=f"Could not produce output {name} die to mismatch in " + f"declared output dimensions versus actual ones." + f"This is most likely a bug. Contact Roboflow team through github issues " + f"(https://github.com/roboflow/inference/issues) providing full context of" + f"the problem - including workflow definition you use.", + context="workflow_execution | output_construction", + ) + major_batch_size = len( + execution_data_manager.get_lineage_indices(lineage=[generated_lineage]) + ) + results = {name: [] for name in outputs_arrays} + for i in range(major_batch_size): + for name, array in outputs_arrays.items(): + if array is None or len(array) <= i: + level = dimensionality_for_output_nodes[name] - 1 + if level > 0: + element = create_empty_index_array( + level=level, + accumulator=[], + ) + else: + element = None + else: + element = array[i] + results[name].append(element) + return results + + def create_array(indices: np.ndarray) -> Optional[list]: if indices.size == 0: return None diff --git a/tests/inference/unit_tests/models/test_owlv2_max_detections.py b/tests/inference/unit_tests/models/test_owlv2_max_detections.py index 7b69eaab10..86296d43ed 100644 --- a/tests/inference/unit_tests/models/test_owlv2_max_detections.py +++ b/tests/inference/unit_tests/models/test_owlv2_max_detections.py @@ -12,7 +12,9 @@ def test_infer_from_embed_respects_max_detections(monkeypatch): dtype=torch.float32, ) image_class_embeds = torch.zeros((4, 2)) - model.get_image_embeds = MagicMock(return_value=(None, image_boxes, image_class_embeds, None, None)) + model.get_image_embeds = MagicMock( + return_value=(None, image_boxes, image_class_embeds, None, None) + ) def fake_get_class_preds_from_embeds(*args, **kwargs): boxes = image_boxes @@ -20,8 +22,14 @@ def fake_get_class_preds_from_embeds(*args, **kwargs): scores = torch.tensor([0.9, 0.8, 0.7, 0.6]) return boxes, classes, scores - monkeypatch.setattr(owlv2, "get_class_preds_from_embeds", fake_get_class_preds_from_embeds) - monkeypatch.setattr(owlv2.torchvision.ops, "nms", lambda boxes, scores, iou: torch.arange(boxes.shape[0])) + monkeypatch.setattr( + owlv2, "get_class_preds_from_embeds", fake_get_class_preds_from_embeds + ) + monkeypatch.setattr( + owlv2.torchvision.ops, + "nms", + lambda boxes, scores, iou: torch.arange(boxes.shape[0]), + ) query_embeddings = {"a": {"positive": torch.zeros((1, 2)), "negative": None}} predictions = model.infer_from_embed( diff --git a/tests/workflows/integration_tests/compilation/test_compilation_of_workflow_with_invalid_plugin.py b/tests/workflows/integration_tests/compilation/test_compilation_of_workflow_with_invalid_plugin.py index 2f69278dd3..f960d40f23 100644 --- a/tests/workflows/integration_tests/compilation/test_compilation_of_workflow_with_invalid_plugin.py +++ b/tests/workflows/integration_tests/compilation/test_compilation_of_workflow_with_invalid_plugin.py @@ -6,7 +6,7 @@ from inference.core.managers.base import ModelManager from inference.core.workflows.core_steps.common.entities import StepExecutionMode from inference.core.workflows.errors import BlockInterfaceError -from inference.core.workflows.execution_engine.constants import TOP_LEVEL_LINEAGE_KEY +from inference.core.workflows.execution_engine.constants import TOP_LEVEL_LINEAGES_KEY from inference.core.workflows.execution_engine.introspection import blocks_loader from inference.core.workflows.execution_engine.v1.compiler.core import compile_workflow @@ -145,10 +145,9 @@ def test_compilation_of_workflow_where_block_is_not_simd_but_defines_output_offs ) # then - assert ( - compiled_workflow.execution_graph.graph[TOP_LEVEL_LINEAGE_KEY] - == "$steps.problematic_dimensions" - ) + assert compiled_workflow.execution_graph.graph[TOP_LEVEL_LINEAGES_KEY] == { + "$steps.problematic_dimensions" + } WORKFLOW_WITH_INVALID_BLOCK_DECLARING_DIMENSIONALITY_REFERENCE_PROPERTY_AS_NON_BATCH = { diff --git a/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py b/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py index 10d07d8227..338d42227f 100644 --- a/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py +++ b/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py @@ -518,7 +518,7 @@ def test_workflow_with_multiple_scalar_producers_feeding_non_simd_consumer_raisi result = execution_engine.run(runtime_parameters={}) # then - assert result == [{"shapes": "[192, 168, 3][220, 230, 3]"}] + assert result == [{"shapes": ["[192, 168, 3][220, 230, 3]"]}] WORKFLOW_WITH_NON_SIMD_CONSUMER_RAISING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCER_AND_BATCH_INPUT = { @@ -986,16 +986,37 @@ def test_workflow_with_batched_inputs_at_dim_1_fed_into_consumer_decreasing_the_ "workflows_core.api_key": None, "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CONSUMER_DECREASING_OUTPUT_DIM_FED_BY_BATCH_INPUTS_AT_DIM_1_BOOSTING_DIM_AT_THE_END, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + image_1 = [ + np.zeros((192, 168, 3), dtype=np.uint8), + np.zeros((292, 168, 3), dtype=np.uint8), + ] + image_2 = [ + np.zeros((392, 168, 3), dtype=np.uint8), + np.zeros((492, 168, 3), dtype=np.uint8), + ] # when - with pytest.raises(AssumptionError): - # TESTING CURRENT LIMITATION OF EE - WE CANNOT HAVE A BLOCK THAT YIELDS NEW 1ST LEVEL - # OF DIMENSIONALITY (WHICH IS DICTATED BY INPUTS)! - _ = ExecutionEngine.init( - workflow_definition=WORKFLOW_WITH_SIMD_CONSUMER_DECREASING_OUTPUT_DIM_FED_BY_BATCH_INPUTS_AT_DIM_1_BOOSTING_DIM_AT_THE_END, - init_parameters=workflow_init_parameters, - max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, - ) + results = execution_engine.run( + { + "image_1": image_1, + "image_2": image_2, + } + ) + + # then + assert results == [ + { + "shapes": [ + "[192, 168, 3][392, 168, 3]\n[292, 168, 3][492, 168, 3]", + "[192, 168, 3][392, 168, 3]\n[292, 168, 3][492, 168, 3]", + ] + } + ] WORKFLOW_WITH_SIMD_CONSUMER_DECREASING_OUTPUT_DIM_FED_BY_BATCH_INPUTS_AT_DIM_2 = { @@ -1141,8 +1162,7 @@ def test_workflow_with_scalar_inputs_fed_into_consumer_decreasing_the_dimensiona # then assert results == [ - {"shapes": "[192, 168, 3][192, 168, 3]"}, - {"shapes": "[192, 168, 3][192, 168, 3]"}, + {"shapes": ["[192, 168, 3][192, 168, 3]", "[192, 168, 3][192, 168, 3]"]}, ] @@ -1212,17 +1232,19 @@ def test_workflow_with_non_simd_consumers_accepting_list_of_scalar_selector( results = execution_engine.run(runtime_parameters={}) # then - assert len(results) == 2, "Expected dim increase to happen" - assert [i.numpy_image.shape for i in results[0]["x"]] == [ + assert ( + len(results) == 1 + ), "Expected dim increase to happen, but should be nested according to how we treat emergent dimensions" + assert len(results[0]["x"]) == 2 + assert [i.numpy_image.shape for i in results[0]["x"][0]] == [ (100, 100, 3), (200, 200, 3), ] assert [i.numpy_image.shape for i in results[0]["y"]] == [(300, 300, 3)] - assert [i.numpy_image.shape for i in results[1]["x"]] == [ + assert [i.numpy_image.shape for i in results[0]["x"][1]] == [ (100, 100, 3), (200, 200, 3), ] - assert [i.numpy_image.shape for i in results[1]["y"]] == [(300, 300, 3)] WORKFLOW_WITH_NON_SIMD_CUSTOMER_ACCEPTING_LIST_OF_BATCH_IMAGES = { @@ -2117,17 +2139,19 @@ def test_workflow_with_simd_consumers_accepting_list_of_scalar_selector( results = execution_engine.run(runtime_parameters={}) # then - assert len(results) == 2, "Expected dim increase to happen" - assert [i.numpy_image.shape for i in results[0]["x"]] == [ + assert ( + len(results) == 1 + ), "Expected dim increase to happen, but in artificially nested dim" + assert len(results[0]["x"]) == 2 + assert [i.numpy_image.shape for i in results[0]["x"][0]] == [ (100, 100, 3), (200, 200, 3), ] assert [i.numpy_image.shape for i in results[0]["y"]] == [(300, 300, 3)] - assert [i.numpy_image.shape for i in results[1]["x"]] == [ + assert [i.numpy_image.shape for i in results[0]["x"][1]] == [ (100, 100, 3), (200, 200, 3), ] - assert [i.numpy_image.shape for i in results[1]["y"]] == [(300, 300, 3)] WORKFLOW_WITH_SIMD_CUSTOMER_ACCEPTING_LIST_OF_BATCH_IMAGES = { @@ -3906,7 +3930,7 @@ def test_workflow_with_multiple_scalar_producers_feeding_non_simd_consumer_raisi result = execution_engine.run(runtime_parameters={}) # then - assert result == [{"shapes": None}] + assert result == [{"shapes": [None]}] WORKFLOW_WITH_SIMD_CONSUMER_RAISING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCERS = { @@ -3970,7 +3994,7 @@ def test_workflow_with_multiple_scalar_producers_feeding_simd_consumer_raising_d result = execution_engine.run(runtime_parameters={}) # then - assert result == [{"shapes": "[192, 168, 3][220, 230, 3]"}] + assert result == [{"shapes": ["[192, 168, 3][220, 230, 3]"]}] WORKFLOW_WITH_SIMD_CONSUMER_RAISING_OUTPUT_DIM_TWICE_FED_BY_SCALAR_PRODUCERS = { @@ -4040,7 +4064,7 @@ def test_workflow_with_multiple_scalar_producers_feeding_simd_consumer_raising_d # then assert result == [ - {"shapes": ["[192, 168, 3][220, 230, 3]", "[192, 168, 3][220, 230, 3]"]} + {"shapes": [["[192, 168, 3][220, 230, 3]", "[192, 168, 3][220, 230, 3]"]]} ] @@ -4111,7 +4135,7 @@ def test_workflow_with_multiple_scalar_producers_feeding_simd_consumer_raising_d result = execution_engine.run(runtime_parameters={}) # then - assert result == [{"shapes": None}] + assert result == [{"shapes": []}] WORKFLOW_WITH_SIMD_CONSUMER_RAISING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCERS_AND_FLOW_CONTROL_AT_DIM_1 = { @@ -4186,7 +4210,7 @@ def test_workflow_with_multiple_scalar_producers_feeding_simd_consumer_raising_d result = execution_engine.run(runtime_parameters={}) # then - assert result == [{"shapes": None}] + assert result == [{"shapes": [None]}] WORKFLOW_WITH_SIMD_CONSUMER_RAISING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCER_AND_BATCH_INPUT = { diff --git a/tests/workflows/unit_tests/execution_engine/executor/test_output_constructor.py b/tests/workflows/unit_tests/execution_engine/executor/test_output_constructor.py index d88c456f15..3e12217f89 100644 --- a/tests/workflows/unit_tests/execution_engine/executor/test_output_constructor.py +++ b/tests/workflows/unit_tests/execution_engine/executor/test_output_constructor.py @@ -9,7 +9,7 @@ from inference.core.workflows.core_steps.loader import KINDS_SERIALIZERS from inference.core.workflows.errors import AssumptionError, ExecutionEngineRuntimeError from inference.core.workflows.execution_engine.constants import ( - TOP_LEVEL_LINEAGE_KEY, + TOP_LEVEL_LINEAGES_KEY, WORKFLOW_INPUT_BATCH_LINEAGE_ID, ) from inference.core.workflows.execution_engine.entities.base import JsonField @@ -501,7 +501,7 @@ def test_construct_workflow_output_when_batch_outputs_present() -> None: data_lookup = { "$steps.other.c": "c_value", } - execution_graph.graph[TOP_LEVEL_LINEAGE_KEY] = WORKFLOW_INPUT_BATCH_LINEAGE_ID + execution_graph.graph[TOP_LEVEL_LINEAGES_KEY] = WORKFLOW_INPUT_BATCH_LINEAGE_ID def get_non_batch_data(selector: str) -> Any: return data_lookup[selector] From 852fe5464bc0fa688d4d5ca042134a0e1297367c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Tue, 26 Aug 2025 13:14:54 +0200 Subject: [PATCH 19/20] Add more tests and clarify docs --- docs/workflows/execution_engine_changelog.md | 46 +++-- docs/workflows/workflow_execution.md | 10 ++ docs/workflows/workflows_execution_engine.md | 18 +- ...ng_scalars_to_fit_into_batch_parameters.py | 159 ++++++++++++++++++ 4 files changed, 202 insertions(+), 31 deletions(-) diff --git a/docs/workflows/execution_engine_changelog.md b/docs/workflows/execution_engine_changelog.md index f1e2419b1b..b9c0f1ebac 100644 --- a/docs/workflows/execution_engine_changelog.md +++ b/docs/workflows/execution_engine_changelog.md @@ -85,23 +85,41 @@ upgrade to use `get_parameters_enforcing_auto_batch_casting(...)` instead of on nested batches). This limitation is now removed. Dimensionality collapse blocks may also operate on scalars, with the output dimensionality “bouncing off” the zero ground. -* There is one **important limitation** uncovered by these changes. Since Auto Batch Casting allows scalars to be -converted into batches (when a scalar is fed into a block that increases dimensionality), it is possible to end up with -multiple batches at the first nesting level, each with a different origin (lineage). In this case, the current -Execution Engine implementation cannot deterministically construct the output. Previous versions assumed that outputs -were always lists of elements, with the order determined by the input batch. With dynamically generated batches, -this assumption no longer holds. Fixing this design flaw would require a breaking change for all customers, -so it is deferred to **Execution Engine v2.0**. For now, an assertion has been introduced in the code, raising the -following error: + +There is one **key change in how outputs are built.** In earlier versions of Execution Error, a block was not allowed +to produce a `Batch[X]` directly at the first dimension level — that space was reserved for mapping onto input batches. +Starting with version `v1.6.0`, this restriction has been removed. + +Previously, outputs were always returned as a list of elements: + +* aligned with the input batches, or + +* a single-element list if only scalars were given as inputs. + +This raised a question: what should happen if a block now produces a batch at the first dimension level? +We cannot simply `zip(...)` it with input-based outputs, since the size of these newly generated batches might not +match the number of input elements — making the operation ambiguous. + +To resolve this, we adopted the following rule: + +* Treat the situation as if there were a **"dummy" input batch of size 1**. + +* Consider all batches produced from scalar inputs as being one level deeper than they appear. + +* This follows the principle of broadcasting, allowing such outputs to expand consistently across all elements. + +* Input batch may vanish as a result of execution, but when this happens and new first-level dimension emerges, it +is still going to be virtually nested to ensure outputs consistency. + +**Example:** ``` -Workflow Compiler detected that the workflow contains multiple elements which create -top-level data batches - for instance inputs and blocks that create batched outputs from -scalar parameters. We know it sounds convoluted, but the bottom line is that this -situation is known limitation of Workflows Compiler. -Contact Roboflow team through github issues (https://github.com/roboflow/inference/issues) -providing full context of the problem - including workflow definition you use. +(NO INPUTS) IMAGE FETCHER BLOCK --> image --> OD MODEL --> predictons --> CROPS --> output will be: ["crops": [, , ...]] ``` + +It is important to note that **results generated from previously created workflows valid will be the same** and the +change will only affect new workflows created to utilise new functionalities. + ### Migration guide ??? Hint "Adding `get_parameters_enforcing_auto_batch_casting(...)` method" diff --git a/docs/workflows/workflow_execution.md b/docs/workflows/workflow_execution.md index d3ca87347d..daf962807d 100644 --- a/docs/workflows/workflow_execution.md +++ b/docs/workflows/workflow_execution.md @@ -364,6 +364,16 @@ execution excludes steps at higher `dimensionality levels` from producing output output field selecting that values will be presented as nested list of empty lists, with depth matching `dimensionality level - 1` of referred output. +Since Execution Engine `v1.6.0`, blocks within a workflow may collapse batches into scalars, as well as create new +batches from scalar inputs. The first scenario is pretty easy to understand - each dictionary in the output list will +simply be populated with the same scalar value. The case of *emergent* batch is slightly more complicated. +In such case we can find batch at dimensionality level 1, which has shape or elements order not compliant +with input batches. To prevent semantic ambiguity, we treat such batch as if it's dimensionality is one level higher +(as if **there is additional batch-oriented input of size one attached to the input of the block creating batch +dynamically**). Such virtually nested outputs are broadcast, such that each dictionary in the output list will be given +new key with the same nested output. This nesting property is preserved even if there is no input-derived outputs +for given workflow - in such case, output is a list of size 1 which contains dictionary with nested output. + Some outputs would require serialisation when Workflows Execution Engine runs behind HTTP API. We use the following serialisation strategies: diff --git a/docs/workflows/workflows_execution_engine.md b/docs/workflows/workflows_execution_engine.md index 7ba6a60fa3..cc6b9931d9 100644 --- a/docs/workflows/workflows_execution_engine.md +++ b/docs/workflows/workflows_execution_engine.md @@ -86,7 +86,7 @@ batch-oriented input, it will be treated as a SIMD step. Non-SIMD steps, by contrast, are expected to deliver a single result for the input data. In the case of non-SIMD flow-control steps, they affect all downstream steps as a whole, rather than individually for each element in a batch. -Historically, Execution Engine could not handle well al scenarios when non-SIMD steps' outputs were fed into SIMD steps +Historically, Execution Engine could not handle well all scenarios when non-SIMD steps' outputs were fed into SIMD steps inputs - causing compilation error due to lack of ability to automatically cast such outputs into batches when feeding into SIMD seps. Starting with Execution Engine `v1.6.0`, the handling of SIMD and non-SIMD blocks has been improved through the introduction of **Auto Batch Casting**: @@ -99,22 +99,6 @@ batch-oriented inputs when available. Missing dimensions are generated in a mann * Outputs are evaluated against the casting context - leaving them as scalars when block keeps or decreases output dimensionality or **creating new batches** when increase of dimensionality is expected. -!!! warning "We don't support multiple sources of batch-oriented data" - - While Auto Batch Casting simplifies mixing SIMD and non-SIMD blocks, there is one major limitation to be aware of. - - If multiple first-level batches are created from different origins (for instance inputs and steps taking scalars - and raising output dimensionality into batch at first level of depth), the Execution Engine cannot deterministically - construct the output. In previous versions, the assumption was that **outputs were lists directly tied to inputs - batch order**. With Auto Batch Casting, batches may also be generated dynamically, and no deterministic ordering - can be guaranteed (imagine scenario when you feed batch of 4 images, and there is a block generating dynamic batch - with 3 images - when results are to be returned, Execution Engine is unable to determine a single input batch which - would dictate output order alignment, which is a hard requirement caused by falty design choices). - - To prevent unpredictable behaviour, the Execution Engine asserts in this scenario and raises an error instead of - proceeding. Resolving this design flaw requires breaking changes and is therefore deferred to - **Execution Engine v2.0.** - ### Preparing step inputs diff --git a/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py b/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py index 338d42227f..81b383d78c 100644 --- a/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py +++ b/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py @@ -4345,3 +4345,162 @@ def test_workflow_with_batch_inputs_feeding_simd_consumer_raising_dim( {"shapes": ["[292, 168, 3][293, 168, 3]"]}, {"shapes": ["[392, 168, 3][393, 168, 3]"]}, ] + + +WORKFLOW_WITH_INPUTS_DERIVED_NESTED_DIMS_AND_EMERGED_NESTED_DIMS = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image"}, + ], + "steps": [ + { + "type": "ObjectDetectionModel", + "name": "general_detection", + "image": "$inputs.image", + "model_id": "yolov8n-640", + "class_filter": ["dog"], + }, + { + "type": "Crop", + "name": "cropping", + "image": "$inputs.image", + "predictions": "$steps.general_detection.predictions", + }, + { + "type": "EachSecondPass", + "name": "condition_batch_1", + "x": "$steps.cropping.crops", + "next_steps": ["$steps.breds_classification"], + }, + { + "type": "ClassificationModel", + "name": "breds_classification", + "image": "$steps.cropping.crops", + "model_id": "dog-breed-xpaq6/1", + "confidence": 0.09, + }, + { + "type": "ImageProducer", + "name": "image_producer_x", + "shape": (192, 168, 3), + }, + { + "type": "ImageProducer", + "name": "image_producer_y", + "shape": (292, 168, 3), + }, + { + "type": "IdentitySIMD", + "name": "identity_simd", + "x": "$steps.image_producer_x.image", + }, + { + "type": "MultiSIMDImageConsumerRaisingDim", + "name": "image_consumer", + "images_x": "$steps.identity_simd.x", + "images_y": "$steps.image_producer_y.image", + }, + { + "type": "DimensionCollapse", + "name": "inputs_concatenation", + "data": "$inputs.image", + }, + { + "type": "DimensionCollapse", + "name": "outputs_concatenation", + "data": "$steps.image_consumer.shapes", + }, + { + "type": "DimensionCollapse", + "name": "outputs_concatenation_2", + "data": "$steps.outputs_concatenation.output", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "input_image", + "selector": "$inputs.image", + }, + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.image_consumer.shapes", + }, + { + "type": "JsonField", + "name": "collapsed_input", + "selector": "$steps.inputs_concatenation.output", + }, + { + "type": "JsonField", + "name": "collapsed_output", + "selector": "$steps.outputs_concatenation.output", + }, + { + "type": "JsonField", + "name": "collapsed_output_2", + "selector": "$steps.outputs_concatenation_2.output", + }, + { + "type": "JsonField", + "name": "breds_classification", + "selector": "$steps.breds_classification.predictions", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_input_derived_dims_and_emergent_dims( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, + dogs_image: np.ndarray, + crowd_image: np.ndarray, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + + # then + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_INPUTS_DERIVED_NESTED_DIMS_AND_EMERGED_NESTED_DIMS, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run( + runtime_parameters={ + "image": [dogs_image, crowd_image], + } + ) + + # then + assert ( + len(result) == 2 + ), "Two inputs provided, their dimensions survived to the output, hence 2 outputs expected" + assert len(result[0]["collapsed_input"]) == 2 + assert np.allclose(result[0]["collapsed_input"][0].numpy_image, dogs_image) + assert np.allclose(result[0]["collapsed_input"][1].numpy_image, crowd_image) + assert np.allclose(result[0]["input_image"].numpy_image, dogs_image) + assert result[0]["shapes"] == ["[192, 168, 3][292, 168, 3]"] + assert result[0]["collapsed_output"] == ["[192, 168, 3][292, 168, 3]"] + assert result[0]["collapsed_output_2"] == [["[192, 168, 3][292, 168, 3]"]] + assert [ + e["top"] if e is not None else None for e in result[0]["breds_classification"] + ] == ["116.Parson_russell_terrier", None] + assert len(result[1]["collapsed_input"]) == 2 + assert np.allclose(result[1]["collapsed_input"][0].numpy_image, dogs_image) + assert np.allclose(result[1]["collapsed_input"][1].numpy_image, crowd_image) + assert np.allclose(result[1]["input_image"].numpy_image, crowd_image) + assert result[1]["shapes"] == ["[192, 168, 3][292, 168, 3]"] + assert result[1]["collapsed_output"] == ["[192, 168, 3][292, 168, 3]"] + assert result[1]["collapsed_output_2"] == [["[192, 168, 3][292, 168, 3]"]] + assert result[1]["breds_classification"] == [] From 8355698b4135c93d32a13289e0d0da73c4a0e07a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Tue, 26 Aug 2025 13:23:35 +0200 Subject: [PATCH 20/20] Add proper auth to integration tests --- ...t_plugins_enforcing_scalars_to_fit_into_batch_parameters.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py b/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py index 81b383d78c..7c2f4b9886 100644 --- a/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py +++ b/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py @@ -4457,6 +4457,7 @@ def test_workflow_with_input_derived_dims_and_emergent_dims( model_manager: ModelManager, dogs_image: np.ndarray, crowd_image: np.ndarray, + roboflow_api_key: str, ) -> None: # given get_plugin_modules_mock.return_value = [ @@ -4464,7 +4465,7 @@ def test_workflow_with_input_derived_dims_and_emergent_dims( ] workflow_init_parameters = { "workflows_core.model_manager": model_manager, - "workflows_core.api_key": None, + "workflows_core.api_key": roboflow_api_key, "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, }