Skip to content

Commit

Permalink
Merge pull request #409 from roboflow/feature/sv_detections_in_workflows
Browse files Browse the repository at this point in the history
`development` branch for new `workflows` release
  • Loading branch information
PawelPeczek-Roboflow committed Jun 26, 2024
2 parents e2e1831 + aa0d92b commit d6cb89d
Show file tree
Hide file tree
Showing 232 changed files with 23,718 additions and 7,468 deletions.
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -165,5 +165,6 @@ scratch
!tests/inference/unit_tests/core/utils/assets/*.jpg
docs/workflows/blocks/*
docs/workflows/kinds/*
!tests/workflows/integration_tests/assets/*.jpg
!tests/workflows/integration_tests/execution/assets/*.jpg
!tests/workflows/integration_tests/execution/assets/rock_paper_scissors/*.jpg
!tests/workflows/unit_tests/core_steps/models/third_party/assets/*.png
21 changes: 10 additions & 11 deletions development/docs/build_block_docs.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,10 @@
discover_blocks_connections,
)
from inference.core.workflows.execution_engine.introspection.entities import (
SelectorDefinition, BlockDescription,
SelectorDefinition, BlockDescription, BlockManifestMetadata,
)
from inference.core.workflows.execution_engine.introspection.schema_parser import (
parse_block_manifest_schema,
parse_block_manifest,
)
from inference.core.workflows.prototypes.block import WorkflowBlock

Expand Down Expand Up @@ -165,11 +165,12 @@ def main() -> None:
BLOCK_DOCUMENTATION_DIRECTORY, documentation_file_name
)
example_definition = generate_example_step_definition(block=block)
parsed_manifest = parse_block_manifest(manifest_type=block.manifest_class)
documentation_content = BLOCK_DOCUMENTATION_TEMPLATE.format(
class_name=block.manifest_type_identifier,
description=long_description,
block_inputs=format_block_inputs(block.block_schema),
block_input_bindings=format_input_bindings(block.block_schema),
block_inputs=format_block_inputs(parsed_manifest),
block_input_bindings=format_input_bindings(parsed_manifest),
block_output_bindings=format_block_outputs(block.outputs_manifest),
input_connections=format_block_connections(
connections=blocks_connections.input_connections.block_wise[
Expand Down Expand Up @@ -245,22 +246,20 @@ def camel_to_snake(name: str) -> str:
return name.lower()


def format_block_inputs(block_schema: dict) -> str:
parsed_schema = parse_block_manifest_schema(schema=block_schema)
def format_block_inputs(parsed_manifest: BlockManifestMetadata) -> str:
rows = []
for input_description in parsed_schema.primitive_types.values():
ref_appear = input_description.property_name in parsed_schema.selectors
for input_description in parsed_manifest.primitive_types.values():
ref_appear = input_description.property_name in parsed_manifest.selectors
rows.append(
f"| `{input_description.property_name}` | `{input_description.type_annotation}` | "
f"{input_description.property_description}. | {'✅' if ref_appear else '❌'} |"
)
return "\n".join(USER_CONFIGURATION_HEADER + rows)


def format_input_bindings(block_schema: dict) -> str:
parsed_schema = parse_block_manifest_schema(schema=block_schema)
def format_input_bindings(parsed_manifest: BlockManifestMetadata) -> str:
rows = []
for selector in parsed_schema.selectors.values():
for selector in parsed_manifest.selectors.values():
kinds_annotation = prepare_selector_kinds_annotation(selector=selector)
rows.append(
f" - `{selector.property_name}` (*{kinds_annotation}*): {selector.property_description}."
Expand Down
62 changes: 35 additions & 27 deletions development/stream_interface/workflows_demo.py
Original file line number Diff line number Diff line change
@@ -1,39 +1,42 @@
import os
from threading import Thread
from typing import List, Optional

import cv2
import supervision as sv

from inference import InferencePipeline
from inference.core.interfaces.camera.entities import VideoFrame
from inference.core.interfaces.stream.sinks import render_boxes
from inference.core.interfaces.stream.watchdog import PipelineWatchDog, BasePipelineWatchDog
from inference.core.utils.drawing import create_tiles

STOP = False
ANNOTATOR = sv.BoundingBoxAnnotator()


def main() -> None:
global STOP
watchdog = BasePipelineWatchDog()
workflow_specification = {
"specification": {
"version": "1.0",
"inputs": [
{"type": "WorkflowImage", "name": "image"},
],
"steps": [
{
"type": "ObjectDetectionModel",
"name": "step_1",
"image": "$inputs.image",
"model_id": "yolov8n-640",
"confidence": 0.5,
}
],
"outputs": [
{"type": "JsonField", "name": "predictions", "selector": "$steps.step_1.*"},
],
}
"version": "1.0",
"inputs": [
{"type": "WorkflowImage", "name": "image"},
],
"steps": [
{
"type": "ObjectDetectionModel",
"name": "step_1",
"image": "$inputs.image",
"model_id": "yolov8n-640",
"confidence": 0.5,
}
],
"outputs": [
{"type": "JsonField", "name": "predictions", "selector": "$steps.step_1.predictions"},
],
}
pipeline = InferencePipeline.init_with_workflow(
video_reference=os.environ["VIDEO_REFERENCE"],
video_reference=[os.environ["VIDEO_REFERENCE"]] * 2,
workflow_specification=workflow_specification,
watchdog=watchdog,
on_prediction=workflows_sink,
Expand Down Expand Up @@ -63,14 +66,19 @@ def command_thread(pipeline: InferencePipeline, watchdog: PipelineWatchDog) -> N


def workflows_sink(
predictions: dict,
video_frame: VideoFrame,
predictions: List[Optional[dict]],
video_frames: List[Optional[VideoFrame]],
) -> None:
render_boxes(
predictions["predictions"][0],
video_frame,
display_statistics=True,
)
images_to_show = []
for prediction, frame in zip(predictions, video_frames):
if prediction is None or frame is None:
continue
detections: sv.Detections = prediction["predictions"]
visualised = ANNOTATOR.annotate(frame.image.copy(), detections)
images_to_show.append(visualised)
tiles = create_tiles(images=images_to_show)
cv2.imshow(f"Predictions", tiles)
cv2.waitKey(1)


if __name__ == '__main__':
Expand Down
10 changes: 10 additions & 0 deletions docker/dockerfiles/Dockerfile.onnx.jetson.5.1.1
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,16 @@ COPY requirements/requirements.clip.txt \

RUN python3.9 -m pip install --ignore-installed PyYAML && rm -rf ~/.cache/pip

# We needed to take statically compiled library for last known stable build and put it into hosting
# That was due to faulty builds started 26.06.2024, probably due to release of new version
# of pybind11, which gets automatically pulled while build of zxing_cpp library making
# cmake to fail
RUN wget https://storage.googleapis.com/roboflow-tests-assets/zxing_cpp_library_compiled_for_inference_v0.12.1.tar.gz \
&& tar -xvzf zxing_cpp_library_compiled_for_inference_v0.12.1.tar.gz \
&& mv zxing_cpp-2.2.0.dist-info /usr/local/lib/python3.9/dist-packages/zxing_cpp-2.2.0.dist-info \
&& mv zxingcpp.cpython-39-aarch64-linux-gnu.so /usr/local/lib/python3.9/dist-packages/ \
&& rm zxing_cpp_library_compiled_for_inference_v0.12.1.tar.gz

RUN python3.9 -m pip install --upgrade pip && python3.9 -m pip install \
git+https://github.com/pypdfium2-team/pypdfium2 \
-r _requirements.txt \
Expand Down
2 changes: 1 addition & 1 deletion docs/quickstart/docker_configuration_options.md
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ Sets the default non-maximal suppression (NMS) behavior for detection type model

Variable: **ALLOW_ORIGINS**

Type: String (default = "")
Type: String (default = "*")

Sets the allow_origins property on the CORSMiddleware used with FastAPI for HTTP interfaces. Multiple values can be provided separated by a comma (ex. ALLOW_ORIGINS=orig1.com,orig2.com).

Expand Down
2 changes: 1 addition & 1 deletion docs/quickstart/http_inference.md
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ There are two generations of routes in a Roboflow inference server To see what r

=== "Batch Inference"

Roboflow object detection models support batching. Utilize batch inference by passing a list of image objects in a request payload:
Object detection models support batching. Utilize batch inference by passing a list of image objects in a request payload:

```python
import requests
Expand Down
2 changes: 1 addition & 1 deletion docs/quickstart/run_model_on_image.md
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ There are two generations of routes in a Roboflow inference server To see what r

=== "Batch Inference"

Roboflow object detection models support batching. Utilize batch inference by passing a list of image objects in a request payload:
Object detection models support batching. Utilize batch inference by passing a list of image objects in a request payload:

```python
import requests
Expand Down
25 changes: 14 additions & 11 deletions docs/workflows/blocks.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,26 +13,29 @@ hide:
</div>
<div class="custom-grid">
<!--- AUTOGENERATED_BLOCKS_LIST -->
<p class="card block-card" data-url="condition" data-name="Condition" data-desc="Control the flow of a workflow based on the result of a step." data-labels="FLOW_CONTROL, APACHE-2.0" data-author=""></p>
<p class="card block-card" data-url="detections_consensus" data-name="DetectionsConsensus" data-desc="Combine predictions from multiple detections models to make a decision about object presence." data-labels="FUSION, APACHE-2.0" data-author=""></p>
<p class="card block-card" data-url="clip_comparison" data-name="ClipComparison" data-desc="Compare CLIP image and text embeddings." data-labels="MODEL, APACHE-2.0" data-author=""></p>
<p class="card block-card" data-url="lmm" data-name="LMM" data-desc="Run a large language model." data-labels="MODEL, APACHE-2.0" data-author=""></p>
<p class="card block-card" data-url="lmm_for_classification" data-name="LMMForClassification" data-desc="Run a large language model for classification." data-labels="MODEL, APACHE-2.0" data-author=""></p>
<p class="card block-card" data-url="ocr_model" data-name="OCRModel" data-desc="Run Optical Character Recognition on a model." data-labels="MODEL, APACHE-2.0" data-author=""></p>
<p class="card block-card" data-url="yolo_world_model" data-name="YoloWorldModel" data-desc="Run a zero-shot object detection model." data-labels="MODEL, APACHE-2.0" data-author=""></p>
<p class="card block-card" data-url="roboflow_instance_segmentation_model" data-name="RoboflowInstanceSegmentationModel" data-desc="Run an instance segmentation model." data-labels="MODEL, APACHE-2.0" data-author=""></p>
<p class="card block-card" data-url="roboflow_keypoint_detection_model" data-name="RoboflowKeypointDetectionModel" data-desc="Run inference on a keypoint detection model." data-labels="MODEL, APACHE-2.0" data-author=""></p>
<p class="card block-card" data-url="roboflow_classification_model" data-name="RoboflowClassificationModel" data-desc="Run a classification model." data-labels="MODEL, APACHE-2.0" data-author=""></p>
<p class="card block-card" data-url="roboflow_multi_label_classification_model" data-name="RoboflowMultiLabelClassificationModel" data-desc="Run a multi-label classification model." data-labels="MODEL, APACHE-2.0" data-author=""></p>
<p class="card block-card" data-url="roboflow_object_detection_model" data-name="RoboflowObjectDetectionModel" data-desc="Detect objects using an object detection model." data-labels="MODEL, APACHE-2.0" data-author=""></p>
<p class="card block-card" data-url="roboflow_instance_segmentation_model" data-name="RoboflowInstanceSegmentationModel" data-desc="Predict the shape and size of objects." data-labels="MODEL, APACHE-2.0" data-author=""></p>
<p class="card block-card" data-url="roboflow_keypoint_detection_model" data-name="RoboflowKeypointDetectionModel" data-desc="Predict skeletons on objects." data-labels="MODEL, APACHE-2.0" data-author=""></p>
<p class="card block-card" data-url="roboflow_classification_model" data-name="RoboflowClassificationModel" data-desc="Apply a single tag to an image." data-labels="MODEL, APACHE-2.0" data-author=""></p>
<p class="card block-card" data-url="roboflow_multi_label_classification_model" data-name="RoboflowMultiLabelClassificationModel" data-desc="Apply multiple tags to an image." data-labels="MODEL, APACHE-2.0" data-author=""></p>
<p class="card block-card" data-url="roboflow_object_detection_model" data-name="RoboflowObjectDetectionModel" data-desc="Localize objects with bounding boxes." data-labels="MODEL, APACHE-2.0" data-author=""></p>
<p class="card block-card" data-url="barcode_detector" data-name="BarcodeDetector" data-desc="Run Optical Character Recognition on a model." data-labels="MODEL, APACHE-2.0" data-author=""></p>
<p class="card block-card" data-url="qr_code_detector" data-name="QRCodeDetector" data-desc="Detect the location of QR codes in an image." data-labels="MODEL, APACHE-2.0" data-author=""></p>
<p class="card block-card" data-url="active_learning_data_collector" data-name="ActiveLearningDataCollector" data-desc="Collect data and predictions that flow through workflows for use in active learning." data-labels="SINK, APACHE-2.0" data-author=""></p>
<p class="card block-card" data-url="absolute_static_crop" data-name="AbsoluteStaticCrop" data-desc="Use absolute coordinates for cropping." data-labels="TRANSFORMATION, APACHE-2.0" data-author=""></p>
<p class="card block-card" data-url="crop" data-name="Crop" data-desc="Create dynamic crops from a detections model." data-labels="TRANSFORMATION, APACHE-2.0" data-author=""></p>
<p class="card block-card" data-url="detection_filter" data-name="DetectionFilter" data-desc="Filter predictions from detection models based on defined conditions." data-labels="TRANSFORMATION, APACHE-2.0" data-author=""></p>
<p class="card block-card" data-url="absolute_static_crop" data-name="AbsoluteStaticCrop" data-desc="Use absolute coordinates to crop." data-labels="TRANSFORMATION, APACHE-2.0" data-author=""></p>
<p class="card block-card" data-url="dynamic_crop" data-name="DynamicCrop" data-desc="Use model predictions to dynamically crop." data-labels="TRANSFORMATION, APACHE-2.0" data-author=""></p>
<p class="card block-card" data-url="detections_filter" data-name="DetectionsFilter" data-desc="Filters out unwanted Bounding Boxes based on conditions specified" data-labels="TRANSFORMATION, APACHE-2.0" data-author=""></p>
<p class="card block-card" data-url="detection_offset" data-name="DetectionOffset" data-desc="Apply a fixed offset on the width and height of detections." data-labels="TRANSFORMATION, APACHE-2.0" data-author=""></p>
<p class="card block-card" data-url="relative_static_crop" data-name="RelativeStaticCrop" data-desc="Use relative coordinates for cropping." data-labels="TRANSFORMATION, APACHE-2.0" data-author=""></p>
<p class="card block-card" data-url="relative_static_crop" data-name="RelativeStaticCrop" data-desc="Use relative coordinates to crop." data-labels="TRANSFORMATION, APACHE-2.0" data-author=""></p>
<p class="card block-card" data-url="detections_transformation" data-name="DetectionsTransformation" data-desc="Transforms detections manipulating detected Bounding Boxes" data-labels="TRANSFORMATION, APACHE-2.0" data-author=""></p>
<p class="card block-card" data-url="roboflow_dataset_upload" data-name="RoboflowDatasetUpload" data-desc="Save images and predictions in your Roboflow Dataset" data-labels="SINK, APACHE-2.0" data-author=""></p>
<p class="card block-card" data-url="continue_if" data-name="ContinueIf" data-desc="Stops execution of processing branch under certain condition" data-labels="FLOW_CONTROL, APACHE-2.0" data-author=""></p>
<p class="card block-card" data-url="perspective_correction" data-name="PerspectiveCorrection" data-desc="Correct coordinates of detections from plane defined by given polygon to straight rectangular plane of given width and height" data-labels="TRANSFORMATION, APACHE-2.0" data-author=""></p>
<p class="card block-card" data-url="dynamic_zone" data-name="DynamicZone" data-desc="Simplify polygons so they are geometrically convex and simplify them to contain only requested amount of vertices" data-labels="TRANSFORMATION, APACHE-2.0" data-author=""></p>
<!--- AUTOGENERATED_BLOCKS_LIST -->
</div>
</div>
Expand Down
36 changes: 18 additions & 18 deletions docs/workflows/kinds.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,26 +8,26 @@ resolved we need a simple type system - that's what we call `kinds`.

## List of `workflows` kinds
<!--- AUTOGENERATED_KINDS_LIST -->
* [`*`](/workflows/kinds/*): Equivalent of any element
* [`Batch[parent_id]`](/workflows/kinds/batch_parent_id): Identifier of parent for step output
* [`float`](/workflows/kinds/float): Float value
* [`Batch[instance_segmentation_prediction]`](/workflows/kinds/batch_instance_segmentation_prediction): `'predictions'` key from Roboflow instance segmentation model output
* [`Batch[image]`](/workflows/kinds/batch_image): Image in workflows
* [`Batch[prediction_type]`](/workflows/kinds/batch_prediction_type): String value with type of prediction
* [`roboflow_project`](/workflows/kinds/roboflow_project): Roboflow project name
* [`dictionary`](/workflows/kinds/dictionary): Dictionary
* [`string`](/workflows/kinds/string): String value
* [`Batch[dictionary]`](/workflows/kinds/batch_dictionary): Batch of dictionaries
* [`Batch[object_detection_prediction]`](/workflows/kinds/batch_object_detection_prediction): `'predictions'` key from Roboflow object detection model output
* [`Batch[string]`](/workflows/kinds/batch_string): Batch of string values
* [`Batch[keypoint_detection_prediction]`](/workflows/kinds/batch_keypoint_detection_prediction): `'predictions'` key from Keypoint Detection Model output
* [`Batch[parent_id]`](/workflows/kinds/batch_parent_id): Identifier of parent for step output
* [`Batch[classification_prediction]`](/workflows/kinds/batch_classification_prediction): `'predictions'` key from Classification Model outputs
* [`roboflow_model_id`](/workflows/kinds/roboflow_model_id): Roboflow model id
* [`Batch[top_class]`](/workflows/kinds/batch_top_class): Batch of string values representing top class predicted by classification model
* [`Batch[bar_code_detection]`](/workflows/kinds/batch_bar_code_detection): Roboflow prediction with barcode detection
* [`Batch[keypoint_detection_prediction]`](/workflows/kinds/batch_keypoint_detection_prediction): `'predictions'` key from Roboflow keypoint detection model output
* [`boolean`](/workflows/kinds/boolean): Boolean flag
* [`Batch[instance_segmentation_prediction]`](/workflows/kinds/batch_instance_segmentation_prediction): `'predictions'` key from Instance Segmentation Model outputs
* [`*`](/workflows/kinds/*): Equivalent of any element
* [`integer`](/workflows/kinds/integer): Integer value
* [`Batch[image_metadata]`](/workflows/kinds/batch_image_metadata): Dictionary with image metadata required by supervision
* [`Batch[classification_prediction]`](/workflows/kinds/batch_classification_prediction): `'predictions'` key from Roboflow classifier output
* [`roboflow_project`](/workflows/kinds/roboflow_project): Roboflow project name
* [`roboflow_model_id`](/workflows/kinds/roboflow_model_id): Roboflow model id
* [`float_zero_to_one`](/workflows/kinds/float_zero_to_one): `float` value in range `[0.0, 1.0]`
* [`Batch[bar_code_detection]`](/workflows/kinds/batch_bar_code_detection): Prediction with barcode detection
* [`Batch[image]`](/workflows/kinds/batch_image): Image in workflows
* [`Batch[string]`](/workflows/kinds/batch_string): Batch of string values
* [`list_of_values`](/workflows/kinds/list_of_values): List of values of any types
* [`integer`](/workflows/kinds/integer): Integer value
* [`boolean`](/workflows/kinds/boolean): Boolean flag
* [`dictionary`](/workflows/kinds/dictionary): Dictionary
* [`string`](/workflows/kinds/string): String value
* [`Batch[boolean]`](/workflows/kinds/batch_boolean): Boolean values batch
* [`Batch[object_detection_prediction]`](/workflows/kinds/batch_object_detection_prediction): `'predictions'` key from Object Detection Model output
* [`float_zero_to_one`](/workflows/kinds/float_zero_to_one): `float` value in range `[0.0, 1.0]`
* [`Batch[prediction_type]`](/workflows/kinds/batch_prediction_type): String value with type of prediction
<!--- AUTOGENERATED_KINDS_LIST -->
2 changes: 1 addition & 1 deletion docs/workflows/understanding.md
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ class BlockManifest(WorkflowBlockManifest):
)
type: Literal["Crop"]
image: Union[InferenceImageSelector, OutputStepImageSelector] = Field(
description="Reference at image to be used as input for step processing",
description="The image to infer on",
examples=["$inputs.image", "$steps.cropping.crops"],
)
predictions: StepOutputSelector(
Expand Down
Loading

0 comments on commit d6cb89d

Please sign in to comment.