bioimageio.core

  1"""
  2.. include:: ../../README.md
  3"""
  4# ruff: noqa: E402
  5
  6__version__ = "0.9.2"
  7from loguru import logger
  8
  9logger.disable("bioimageio.core")
 10
 11from bioimageio.spec import (
 12    ValidationSummary,
 13    build_description,
 14    dump_description,
 15    load_dataset_description,
 16    load_description,
 17    load_description_and_validate_format_only,
 18    load_model_description,
 19    save_bioimageio_package,
 20    save_bioimageio_package_as_folder,
 21    save_bioimageio_yaml_only,
 22    validate_format,
 23)
 24
 25from . import (
 26    axis,
 27    block_meta,
 28    cli,
 29    commands,
 30    common,
 31    digest_spec,
 32    io,
 33    model_adapters,
 34    prediction,
 35    proc_ops,
 36    proc_setup,
 37    sample,
 38    stat_calculators,
 39    stat_measures,
 40    tensor,
 41)
 42from ._prediction_pipeline import PredictionPipeline, create_prediction_pipeline
 43from ._resource_tests import (
 44    enable_determinism,
 45    load_description_and_test,
 46    test_description,
 47    test_model,
 48)
 49from ._settings import settings
 50from .axis import Axis, AxisId
 51from .backends import create_model_adapter
 52from .block_meta import BlockMeta
 53from .common import MemberId
 54from .prediction import predict, predict_many
 55from .sample import Sample
 56from .stat_calculators import compute_dataset_measures
 57from .stat_measures import Stat
 58from .tensor import Tensor
 59from .weight_converters import add_weights
 60
 61# aliases
 62test_resource = test_description
 63"""alias of `test_description`"""
 64load_resource = load_description
 65"""alias of `load_description`"""
 66load_model = load_model_description
 67"""alias of `load_model_description`"""
 68
 69__all__ = [
 70    "__version__",
 71    "add_weights",
 72    "axis",
 73    "Axis",
 74    "AxisId",
 75    "block_meta",
 76    "BlockMeta",
 77    "build_description",
 78    "cli",
 79    "commands",
 80    "common",
 81    "compute_dataset_measures",
 82    "create_model_adapter",
 83    "create_prediction_pipeline",
 84    "digest_spec",
 85    "dump_description",
 86    "enable_determinism",
 87    "io",
 88    "load_dataset_description",
 89    "load_description_and_test",
 90    "load_description_and_validate_format_only",
 91    "load_description",
 92    "load_model_description",
 93    "load_model",
 94    "load_resource",
 95    "MemberId",
 96    "model_adapters",
 97    "predict_many",
 98    "predict",
 99    "prediction",
100    "PredictionPipeline",
101    "proc_ops",
102    "proc_setup",
103    "sample",
104    "Sample",
105    "save_bioimageio_package_as_folder",
106    "save_bioimageio_package",
107    "save_bioimageio_yaml_only",
108    "settings",
109    "stat_calculators",
110    "stat_measures",
111    "Stat",
112    "tensor",
113    "Tensor",
114    "test_description",
115    "test_model",
116    "test_resource",
117    "validate_format",
118    "ValidationSummary",
119]
__version__ = '0.9.2'
def add_weights( model_descr: bioimageio.spec.ModelDescr, *, output_path: Annotated[pathlib.Path, PathType(path_type='dir')], source_format: Optional[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript']] = None, target_format: Optional[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript']] = None, verbose: bool = False, allow_tracing: bool = True) -> Union[bioimageio.spec.ModelDescr, bioimageio.spec.InvalidDescr]:
 18def add_weights(
 19    model_descr: ModelDescr,
 20    *,
 21    output_path: DirectoryPath,
 22    source_format: Optional[WeightsFormat] = None,
 23    target_format: Optional[WeightsFormat] = None,
 24    verbose: bool = False,
 25    allow_tracing: bool = True,
 26) -> Union[ModelDescr, InvalidDescr]:
 27    """Convert model weights to other formats and add them to the model description
 28
 29    Args:
 30        output_path: Path to save updated model package to.
 31        source_format: convert from a specific weights format.
 32                       Default: choose automatically from any available.
 33        target_format: convert to a specific weights format.
 34                       Default: attempt to convert to any missing format.
 35        devices: Devices that may be used during conversion.
 36        verbose: log more (error) output
 37
 38    Returns:
 39        A (potentially invalid) model copy stored at `output_path` with added weights if any conversion was possible.
 40
 41    """
 42    if not isinstance(model_descr, ModelDescr):
 43        if model_descr.type == "model" and not isinstance(model_descr, InvalidDescr):
 44            raise TypeError(
 45                f"Model format {model_descr.format} is not supported, please update"
 46                + f" model to format {ModelDescr.implemented_format_version} first."
 47            )
 48
 49        raise TypeError(type(model_descr))
 50
 51    # save model to local folder
 52    output_path = save_bioimageio_package_as_folder(
 53        model_descr, output_path=output_path
 54    )
 55    # reload from local folder to make sure we do not edit the given model
 56    model_descr = load_model_description(
 57        output_path, perform_io_checks=False, format_version="latest"
 58    )
 59
 60    if source_format is None:
 61        available = set(model_descr.weights.available_formats)
 62    else:
 63        available = {source_format}
 64
 65    if target_format is None:
 66        missing = set(model_descr.weights.missing_formats)
 67    else:
 68        missing = {target_format}
 69
 70    originally_missing = set(missing)
 71
 72    if "pytorch_state_dict" in available and "torchscript" in missing:
 73        logger.info(
 74            "Attempting to convert 'pytorch_state_dict' weights to 'torchscript'."
 75        )
 76        from .pytorch_to_torchscript import convert
 77
 78        try:
 79            torchscript_weights_path = output_path / "weights_torchscript.pt"
 80            model_descr.weights.torchscript = convert(
 81                model_descr,
 82                output_path=torchscript_weights_path,
 83                use_tracing=False,
 84            )
 85        except Exception as e:
 86            if verbose:
 87                traceback.print_exception(type(e), e, e.__traceback__)
 88
 89            logger.error(e)
 90        else:
 91            available.add("torchscript")
 92            missing.discard("torchscript")
 93
 94    if allow_tracing and "pytorch_state_dict" in available and "torchscript" in missing:
 95        logger.info(
 96            "Attempting to convert 'pytorch_state_dict' weights to 'torchscript' by tracing."
 97        )
 98        from .pytorch_to_torchscript import convert
 99
100        try:
101            torchscript_weights_path = output_path / "weights_torchscript_traced.pt"
102
103            model_descr.weights.torchscript = convert(
104                model_descr,
105                output_path=torchscript_weights_path,
106                use_tracing=True,
107            )
108        except Exception as e:
109            if verbose:
110                traceback.print_exception(type(e), e, e.__traceback__)
111
112            logger.error(e)
113        else:
114            available.add("torchscript")
115            missing.discard("torchscript")
116
117    if "torchscript" in available and "onnx" in missing:
118        logger.info("Attempting to convert 'torchscript' weights to 'onnx'.")
119        from .torchscript_to_onnx import convert
120
121        try:
122            onnx_weights_path = output_path / "weights.onnx"
123            model_descr.weights.onnx = convert(
124                model_descr,
125                output_path=onnx_weights_path,
126            )
127        except Exception as e:
128            if verbose:
129                traceback.print_exception(type(e), e, e.__traceback__)
130
131            logger.error(e)
132        else:
133            available.add("onnx")
134            missing.discard("onnx")
135
136    if "pytorch_state_dict" in available and "onnx" in missing:
137        logger.info("Attempting to convert 'pytorch_state_dict' weights to 'onnx'.")
138        from .pytorch_to_onnx import convert
139
140        try:
141            onnx_weights_path = output_path / "weights.onnx"
142
143            model_descr.weights.onnx = convert(
144                model_descr,
145                output_path=onnx_weights_path,
146                verbose=verbose,
147            )
148        except Exception as e:
149            if verbose:
150                traceback.print_exception(type(e), e, e.__traceback__)
151
152            logger.error(e)
153        else:
154            available.add("onnx")
155            missing.discard("onnx")
156
157    if missing:
158        logger.warning(
159            f"Converting from any of the available weights formats {available} to any"
160            + f" of {missing} failed or is not yet implemented. Please create an issue"
161            + " at https://github.com/bioimage-io/core-bioimage-io-python/issues/new/choose"
162            + " if you would like bioimageio.core to support a particular conversion."
163        )
164
165    if originally_missing == missing:
166        logger.warning("failed to add any converted weights")
167        return model_descr
168    else:
169        logger.info("added weights formats {}", originally_missing - missing)
170        # resave model with updated rdf.yaml
171        _ = save_bioimageio_package_as_folder(model_descr, output_path=output_path)
172        tested_model_descr = load_description_and_test(
173            model_descr, format_version="latest", expected_type="model"
174        )
175        if not isinstance(tested_model_descr, ModelDescr):
176            logger.error(
177                f"The updated model description at {output_path} did not pass testing."
178            )
179
180        return tested_model_descr

Convert model weights to other formats and add them to the model description

Arguments:
  • output_path: Path to save updated model package to.
  • source_format: convert from a specific weights format. Default: choose automatically from any available.
  • target_format: convert to a specific weights format. Default: attempt to convert to any missing format.
  • devices: Devices that may be used during conversion.
  • verbose: log more (error) output
Returns:

A (potentially invalid) model copy stored at output_path with added weights if any conversion was possible.

@dataclass
class Axis:
49@dataclass
50class Axis:
51    id: AxisId
52    type: Literal["batch", "channel", "index", "space", "time"]
53
54    def __post_init__(self):
55        if self.type == "batch":
56            self.id = AxisId("batch")
57        elif self.type == "channel":
58            self.id = AxisId("channel")
59
60    @classmethod
61    def create(cls, axis: AxisLike) -> Axis:
62        if isinstance(axis, cls):
63            return axis
64        elif isinstance(axis, Axis):
65            return Axis(id=axis.id, type=axis.type)
66        elif isinstance(axis, v0_5.AxisBase):
67            return Axis(id=AxisId(axis.id), type=axis.type)
68        elif isinstance(axis, str):
69            return Axis(id=AxisId(axis), type=_guess_axis_type(axis))
70        else:
71            assert_never(axis)
Axis( id: AxisId, type: Literal['batch', 'channel', 'index', 'space', 'time'])
id: AxisId
type: Literal['batch', 'channel', 'index', 'space', 'time']
@classmethod
def create( cls, axis: Union[AxisId, Literal['b', 'i', 't', 'c', 'z', 'y', 'x'], Annotated[Union[bioimageio.spec.model.v0_5.BatchAxis, bioimageio.spec.model.v0_5.ChannelAxis, bioimageio.spec.model.v0_5.IndexInputAxis, bioimageio.spec.model.v0_5.TimeInputAxis, bioimageio.spec.model.v0_5.SpaceInputAxis], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_5.BatchAxis, bioimageio.spec.model.v0_5.ChannelAxis, bioimageio.spec.model.v0_5.IndexOutputAxis, Annotated[Union[Annotated[bioimageio.spec.model.v0_5.TimeOutputAxis, Tag(tag='wo_halo')], Annotated[bioimageio.spec.model.v0_5.TimeOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.model.v0_5.SpaceOutputAxis, Tag(tag='wo_halo')], Annotated[bioimageio.spec.model.v0_5.SpaceOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Axis]) -> Axis:
60    @classmethod
61    def create(cls, axis: AxisLike) -> Axis:
62        if isinstance(axis, cls):
63            return axis
64        elif isinstance(axis, Axis):
65            return Axis(id=axis.id, type=axis.type)
66        elif isinstance(axis, v0_5.AxisBase):
67            return Axis(id=AxisId(axis.id), type=axis.type)
68        elif isinstance(axis, str):
69            return Axis(id=AxisId(axis), type=_guess_axis_type(axis))
70        else:
71            assert_never(axis)
class AxisId(bioimageio.spec._internal.types.LowerCaseIdentifier):
245class AxisId(LowerCaseIdentifier):
246    root_model: ClassVar[Type[RootModel[Any]]] = RootModel[
247        Annotated[
248            LowerCaseIdentifierAnno,
249            MaxLen(16),
250            AfterValidator(_normalize_axis_id),
251        ]
252    ]

str(object='') -> str str(bytes_or_buffer[, encoding[, errors]]) -> str

Create a new string object from the given object. If encoding or errors is specified, then the object must expose a data buffer that will be decoded using the given encoding and error handler. Otherwise, returns the result of object.__str__() (if defined) or repr(object). encoding defaults to sys.getdefaultencoding(). errors defaults to 'strict'.

root_model: ClassVar[Type[pydantic.root_model.RootModel[Any]]] = <class 'pydantic.root_model.RootModel[Annotated[str, MinLen, AfterValidator, AfterValidator, Annotated[TypeVar, Predicate], MaxLen, AfterValidator]]'>

the pydantic root model to validate the string

@dataclass(frozen=True)
class BlockMeta:
 46@dataclass(frozen=True)
 47class BlockMeta:
 48    """Block meta data of a sample member (a tensor in a sample)
 49
 50    Figure for illustration:
 51    The first 2d block (dashed) of a sample member (**bold**).
 52    The inner slice (thin) is expanded by a halo in both dimensions on both sides.
 53    The outer slice reaches from the sample member origin (0, 0) to the right halo point.
 54
 55    ```terminal
 56    ┌ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─  ─ ─ ─ ─ ─ ─ ─ ┐
 57    ╷ halo(left)                         ╷
 58    ╷                                    ╷
 59    ╷  (0, 0)┏━━━━━━━━━━━━━━━━━┯━━━━━━━━━┯━━━➔
 60    ╷        ┃                 │         ╷  sample member
 61    ╷        ┃      inner      │         ╷
 62    ╷        ┃   (and outer)   │  outer  ╷
 63    ╷        ┃      slice      │  slice  ╷
 64    ╷        ┃                 │         ╷
 65    ╷        ┣─────────────────┘         ╷
 66    ╷        ┃   outer slice             ╷
 67    ╷        ┃               halo(right) ╷
 68    └ ─ ─ ─ ─┃─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─┘
 69
 70    ```
 71
 72    note:
 73    - Inner and outer slices are specified in sample member coordinates.
 74    - The outer_slice of a block at the sample edge may overlap by more than the
 75        halo with the neighboring block (the inner slices will not overlap though).
 76
 77    """
 78
 79    sample_shape: PerAxis[int]
 80    """the axis sizes of the whole (unblocked) sample"""
 81
 82    inner_slice: PerAxis[SliceInfo]
 83    """inner region (without halo) wrt the sample"""
 84
 85    halo: PerAxis[Halo]
 86    """halo enlarging the inner region to the block's sizes"""
 87
 88    block_index: BlockIndex
 89    """the i-th block of the sample"""
 90
 91    blocks_in_sample: TotalNumberOfBlocks
 92    """total number of blocks in the sample"""
 93
 94    @cached_property
 95    def shape(self) -> PerAxis[int]:
 96        """axis lengths of the block"""
 97        return Frozen(
 98            {
 99                a: s.stop - s.start + (sum(self.halo[a]) if a in self.halo else 0)
100                for a, s in self.inner_slice.items()
101            }
102        )
103
104    @cached_property
105    def padding(self) -> PerAxis[PadWidth]:
106        """padding to realize the halo at the sample edge
107        where we cannot simply enlarge the inner slice"""
108        return Frozen(
109            {
110                a: PadWidth(
111                    (
112                        self.halo[a].left
113                        - (self.inner_slice[a].start - self.outer_slice[a].start)
114                        if a in self.halo
115                        else 0
116                    ),
117                    (
118                        self.halo[a].right
119                        - (self.outer_slice[a].stop - self.inner_slice[a].stop)
120                        if a in self.halo
121                        else 0
122                    ),
123                )
124                for a in self.inner_slice
125            }
126        )
127
128    @cached_property
129    def outer_slice(self) -> PerAxis[SliceInfo]:
130        """slice of the outer block (without padding) wrt the sample"""
131        return Frozen(
132            {
133                a: SliceInfo(
134                    max(
135                        0,
136                        min(
137                            self.inner_slice[a].start
138                            - (self.halo[a].left if a in self.halo else 0),
139                            self.sample_shape[a]
140                            - self.inner_shape[a]
141                            - (self.halo[a].left if a in self.halo else 0),
142                        ),
143                    ),
144                    min(
145                        self.sample_shape[a],
146                        self.inner_slice[a].stop
147                        + (self.halo[a].right if a in self.halo else 0),
148                    ),
149                )
150                for a in self.inner_slice
151            }
152        )
153
154    @cached_property
155    def inner_shape(self) -> PerAxis[int]:
156        """axis lengths of the inner region (without halo)"""
157        return Frozen({a: s.stop - s.start for a, s in self.inner_slice.items()})
158
159    @cached_property
160    def local_slice(self) -> PerAxis[SliceInfo]:
161        """inner slice wrt the block, **not** the sample"""
162        return Frozen(
163            {
164                a: SliceInfo(
165                    self.halo[a].left,
166                    self.halo[a].left + self.inner_shape[a],
167                )
168                for a in self.inner_slice
169            }
170        )
171
172    @property
173    def dims(self) -> Collection[AxisId]:
174        return set(self.inner_shape)
175
176    @property
177    def tagged_shape(self) -> PerAxis[int]:
178        """alias for shape"""
179        return self.shape
180
181    @property
182    def inner_slice_wo_overlap(self):
183        """subslice of the inner slice, such that all `inner_slice_wo_overlap` can be
184        stiched together trivially to form the original sample.
185
186        This can also be used to calculate statistics
187        without overrepresenting block edge regions."""
188        # TODO: update inner_slice_wo_overlap when adding block overlap
189        return self.inner_slice
190
191    def __post_init__(self):
192        # freeze mutable inputs
193        if not isinstance(self.sample_shape, Frozen):
194            object.__setattr__(self, "sample_shape", Frozen(self.sample_shape))
195
196        if not isinstance(self.inner_slice, Frozen):
197            object.__setattr__(self, "inner_slice", Frozen(self.inner_slice))
198
199        if not isinstance(self.halo, Frozen):
200            object.__setattr__(self, "halo", Frozen(self.halo))
201
202        assert all(
203            a in self.sample_shape for a in self.inner_slice
204        ), "block has axes not present in sample"
205
206        assert all(
207            a in self.inner_slice for a in self.halo
208        ), "halo has axes not present in block"
209
210        if any(s > self.sample_shape[a] for a, s in self.shape.items()):
211            logger.warning(
212                "block {} larger than sample {}", self.shape, self.sample_shape
213            )
214
215    def get_transformed(
216        self, new_axes: PerAxis[Union[LinearAxisTransform, int]]
217    ) -> Self:
218        return self.__class__(
219            sample_shape={
220                a: (
221                    trf
222                    if isinstance(trf, int)
223                    else trf.compute(self.sample_shape[trf.axis])
224                )
225                for a, trf in new_axes.items()
226            },
227            inner_slice={
228                a: (
229                    SliceInfo(0, trf)
230                    if isinstance(trf, int)
231                    else SliceInfo(
232                        trf.compute(self.inner_slice[trf.axis].start),
233                        trf.compute(self.inner_slice[trf.axis].stop),
234                    )
235                )
236                for a, trf in new_axes.items()
237            },
238            halo={
239                a: (
240                    Halo(0, 0)
241                    if isinstance(trf, int)
242                    else Halo(self.halo[trf.axis].left, self.halo[trf.axis].right)
243                )
244                for a, trf in new_axes.items()
245            },
246            block_index=self.block_index,
247            blocks_in_sample=self.blocks_in_sample,
248        )

Block meta data of a sample member (a tensor in a sample)

Figure for illustration: The first 2d block (dashed) of a sample member (bold). The inner slice (thin) is expanded by a halo in both dimensions on both sides. The outer slice reaches from the sample member origin (0, 0) to the right halo point.

┌ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─  ─ ─ ─ ─ ─ ─ ─ ┐
╷ halo(left)                         ╷
╷                                    ╷
╷  (0, 0)┏━━━━━━━━━━━━━━━━━┯━━━━━━━━━┯━━━➔
╷        ┃                 │         ╷  sample member
╷        ┃      inner      │         ╷
╷        ┃   (and outer)   │  outer  ╷
╷        ┃      slice      │  slice  ╷
╷        ┃                 │         ╷
╷        ┣─────────────────┘         ╷
╷        ┃   outer slice             ╷
╷        ┃               halo(right) ╷
└ ─ ─ ─ ─┃─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─┘
         ⬇

note:

  • Inner and outer slices are specified in sample member coordinates.
  • The outer_slice of a block at the sample edge may overlap by more than the halo with the neighboring block (the inner slices will not overlap though).
BlockMeta( sample_shape: Mapping[AxisId, int], inner_slice: Mapping[AxisId, bioimageio.core.common.SliceInfo], halo: Mapping[AxisId, bioimageio.core.common.Halo], block_index: int, blocks_in_sample: int)
sample_shape: Mapping[AxisId, int]

the axis sizes of the whole (unblocked) sample

inner_slice: Mapping[AxisId, bioimageio.core.common.SliceInfo]

inner region (without halo) wrt the sample

halo enlarging the inner region to the block's sizes

block_index: int

the i-th block of the sample

blocks_in_sample: int

total number of blocks in the sample

shape: Mapping[AxisId, int]
 94    @cached_property
 95    def shape(self) -> PerAxis[int]:
 96        """axis lengths of the block"""
 97        return Frozen(
 98            {
 99                a: s.stop - s.start + (sum(self.halo[a]) if a in self.halo else 0)
100                for a, s in self.inner_slice.items()
101            }
102        )

axis lengths of the block

padding: Mapping[AxisId, bioimageio.core.common.PadWidth]
104    @cached_property
105    def padding(self) -> PerAxis[PadWidth]:
106        """padding to realize the halo at the sample edge
107        where we cannot simply enlarge the inner slice"""
108        return Frozen(
109            {
110                a: PadWidth(
111                    (
112                        self.halo[a].left
113                        - (self.inner_slice[a].start - self.outer_slice[a].start)
114                        if a in self.halo
115                        else 0
116                    ),
117                    (
118                        self.halo[a].right
119                        - (self.outer_slice[a].stop - self.inner_slice[a].stop)
120                        if a in self.halo
121                        else 0
122                    ),
123                )
124                for a in self.inner_slice
125            }
126        )

padding to realize the halo at the sample edge where we cannot simply enlarge the inner slice

outer_slice: Mapping[AxisId, bioimageio.core.common.SliceInfo]
128    @cached_property
129    def outer_slice(self) -> PerAxis[SliceInfo]:
130        """slice of the outer block (without padding) wrt the sample"""
131        return Frozen(
132            {
133                a: SliceInfo(
134                    max(
135                        0,
136                        min(
137                            self.inner_slice[a].start
138                            - (self.halo[a].left if a in self.halo else 0),
139                            self.sample_shape[a]
140                            - self.inner_shape[a]
141                            - (self.halo[a].left if a in self.halo else 0),
142                        ),
143                    ),
144                    min(
145                        self.sample_shape[a],
146                        self.inner_slice[a].stop
147                        + (self.halo[a].right if a in self.halo else 0),
148                    ),
149                )
150                for a in self.inner_slice
151            }
152        )

slice of the outer block (without padding) wrt the sample

inner_shape: Mapping[AxisId, int]
154    @cached_property
155    def inner_shape(self) -> PerAxis[int]:
156        """axis lengths of the inner region (without halo)"""
157        return Frozen({a: s.stop - s.start for a, s in self.inner_slice.items()})

axis lengths of the inner region (without halo)

local_slice: Mapping[AxisId, bioimageio.core.common.SliceInfo]
159    @cached_property
160    def local_slice(self) -> PerAxis[SliceInfo]:
161        """inner slice wrt the block, **not** the sample"""
162        return Frozen(
163            {
164                a: SliceInfo(
165                    self.halo[a].left,
166                    self.halo[a].left + self.inner_shape[a],
167                )
168                for a in self.inner_slice
169            }
170        )

inner slice wrt the block, not the sample

dims: Collection[AxisId]
172    @property
173    def dims(self) -> Collection[AxisId]:
174        return set(self.inner_shape)
tagged_shape: Mapping[AxisId, int]
176    @property
177    def tagged_shape(self) -> PerAxis[int]:
178        """alias for shape"""
179        return self.shape

alias for shape

inner_slice_wo_overlap
181    @property
182    def inner_slice_wo_overlap(self):
183        """subslice of the inner slice, such that all `inner_slice_wo_overlap` can be
184        stiched together trivially to form the original sample.
185
186        This can also be used to calculate statistics
187        without overrepresenting block edge regions."""
188        # TODO: update inner_slice_wo_overlap when adding block overlap
189        return self.inner_slice

subslice of the inner slice, such that all inner_slice_wo_overlap can be stiched together trivially to form the original sample.

This can also be used to calculate statistics without overrepresenting block edge regions.

def get_transformed( self, new_axes: Mapping[AxisId, Union[bioimageio.core.block_meta.LinearAxisTransform, int]]) -> Self:
215    def get_transformed(
216        self, new_axes: PerAxis[Union[LinearAxisTransform, int]]
217    ) -> Self:
218        return self.__class__(
219            sample_shape={
220                a: (
221                    trf
222                    if isinstance(trf, int)
223                    else trf.compute(self.sample_shape[trf.axis])
224                )
225                for a, trf in new_axes.items()
226            },
227            inner_slice={
228                a: (
229                    SliceInfo(0, trf)
230                    if isinstance(trf, int)
231                    else SliceInfo(
232                        trf.compute(self.inner_slice[trf.axis].start),
233                        trf.compute(self.inner_slice[trf.axis].stop),
234                    )
235                )
236                for a, trf in new_axes.items()
237            },
238            halo={
239                a: (
240                    Halo(0, 0)
241                    if isinstance(trf, int)
242                    else Halo(self.halo[trf.axis].left, self.halo[trf.axis].right)
243                )
244                for a, trf in new_axes.items()
245            },
246            block_index=self.block_index,
247            blocks_in_sample=self.blocks_in_sample,
248        )
def build_description( content: Mapping[str, YamlValueView], /, *, context: Optional[bioimageio.spec.ValidationContext] = None, format_version: Union[Literal['latest', 'discover'], str] = 'discover') -> Union[Annotated[Union[Annotated[Union[Annotated[bioimageio.spec.application.v0_2.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.2')], Annotated[bioimageio.spec.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='application')], Annotated[Union[Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], Annotated[bioimageio.spec.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')], Annotated[Union[Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], Annotated[bioimageio.spec.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')], Annotated[Union[Annotated[bioimageio.spec.NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.2')], Annotated[bioimageio.spec.NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='notebook')]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.generic.v0_2.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.2')], Annotated[bioimageio.spec.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='generic')], bioimageio.spec.InvalidDescr]:
173def build_description(
174    content: BioimageioYamlContentView,
175    /,
176    *,
177    context: Optional[ValidationContext] = None,
178    format_version: Union[FormatVersionPlaceholder, str] = DISCOVER,
179) -> Union[ResourceDescr, InvalidDescr]:
180    """build a bioimage.io resource description from an RDF's content.
181
182    Use `load_description` if you want to build a resource description from an rdf.yaml
183    or bioimage.io zip-package.
184
185    Args:
186        content: loaded rdf.yaml file (loaded with YAML, not bioimageio.spec)
187        context: validation context to use during validation
188        format_version: (optional) use this argument to load the resource and
189                        convert its metadata to a higher format_version
190
191    Returns:
192        An object holding all metadata of the bioimage.io resource
193
194    """
195
196    return build_description_impl(
197        content,
198        context=context,
199        format_version=format_version,
200        get_rd_class=_get_rd_class,
201    )

build a bioimage.io resource description from an RDF's content.

Use load_description if you want to build a resource description from an rdf.yaml or bioimage.io zip-package.

Arguments:
  • content: loaded rdf.yaml file (loaded with YAML, not bioimageio.spec)
  • context: validation context to use during validation
  • format_version: (optional) use this argument to load the resource and convert its metadata to a higher format_version
Returns:

An object holding all metadata of the bioimage.io resource

def compute_dataset_measures( measures: Iterable[Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], dataset: Iterable[Sample]) -> Dict[Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Union[float, Annotated[Tensor, BeforeValidator(func=<function tensor_custom_before_validator at 0x7f838d7fd440>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer at 0x7f838d7fd620>, return_type=PydanticUndefined, when_used='always')]]]:
578def compute_dataset_measures(
579    measures: Iterable[DatasetMeasure], dataset: Iterable[Sample]
580) -> Dict[DatasetMeasure, MeasureValue]:
581    """compute all dataset `measures` for the given `dataset`"""
582    sample_calculators, calculators = get_measure_calculators(measures)
583    assert not sample_calculators
584
585    ret: Dict[DatasetMeasure, MeasureValue] = {}
586
587    for sample in dataset:
588        for calc in calculators:
589            calc.update(sample)
590
591    for calc in calculators:
592        ret.update(calc.finalize().items())
593
594    return ret

compute all dataset measures for the given dataset

@final
@classmethod
def create_model_adapter( model_description: Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.ModelDescr], *, devices: Optional[Sequence[str]] = None, weight_format_priority_order: Optional[Sequence[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_saved_model_bundle', 'torchscript']]] = None):
 72    @final
 73    @classmethod
 74    def create(
 75        cls,
 76        model_description: Union[v0_4.ModelDescr, v0_5.ModelDescr],
 77        *,
 78        devices: Optional[Sequence[str]] = None,
 79        weight_format_priority_order: Optional[Sequence[SupportedWeightsFormat]] = None,
 80    ):
 81        """
 82        Creates model adapter based on the passed spec
 83        Note: All specific adapters should happen inside this function to prevent different framework
 84        initializations interfering with each other
 85        """
 86        if not isinstance(model_description, (v0_4.ModelDescr, v0_5.ModelDescr)):
 87            raise TypeError(
 88                f"expected v0_4.ModelDescr or v0_5.ModelDescr, but got {type(model_description)}"
 89            )
 90
 91        weights = model_description.weights
 92        errors: List[Exception] = []
 93        weight_format_priority_order = (
 94            DEFAULT_WEIGHT_FORMAT_PRIORITY_ORDER
 95            if weight_format_priority_order is None
 96            else weight_format_priority_order
 97        )
 98        # limit weight formats to the ones present
 99        weight_format_priority_order_present: Sequence[SupportedWeightsFormat] = [
100            w for w in weight_format_priority_order if getattr(weights, w) is not None
101        ]
102        if not weight_format_priority_order_present:
103            raise ValueError(
104                f"None of the specified weight formats ({weight_format_priority_order}) is present ({weight_format_priority_order_present})"
105            )
106
107        for wf in weight_format_priority_order_present:
108            if wf == "pytorch_state_dict":
109                assert weights.pytorch_state_dict is not None
110                try:
111                    from .pytorch_backend import PytorchModelAdapter
112
113                    return PytorchModelAdapter(
114                        model_description=model_description, devices=devices
115                    )
116                except Exception as e:
117                    errors.append(e)
118            elif wf == "tensorflow_saved_model_bundle":
119                assert weights.tensorflow_saved_model_bundle is not None
120                try:
121                    from .tensorflow_backend import create_tf_model_adapter
122
123                    return create_tf_model_adapter(
124                        model_description=model_description, devices=devices
125                    )
126                except Exception as e:
127                    errors.append(e)
128            elif wf == "onnx":
129                assert weights.onnx is not None
130                try:
131                    from .onnx_backend import ONNXModelAdapter
132
133                    return ONNXModelAdapter(
134                        model_description=model_description, devices=devices
135                    )
136                except Exception as e:
137                    errors.append(e)
138            elif wf == "torchscript":
139                assert weights.torchscript is not None
140                try:
141                    from .torchscript_backend import TorchscriptModelAdapter
142
143                    return TorchscriptModelAdapter(
144                        model_description=model_description, devices=devices
145                    )
146                except Exception as e:
147                    errors.append(e)
148            elif wf == "keras_hdf5":
149                assert weights.keras_hdf5 is not None
150                # keras can either be installed as a separate package or used as part of tensorflow
151                # we try to first import the keras model adapter using the separate package and,
152                # if it is not available, try to load the one using tf
153                try:
154                    try:
155                        from .keras_backend import KerasModelAdapter
156                    except Exception:
157                        from .tensorflow_backend import KerasModelAdapter
158
159                    return KerasModelAdapter(
160                        model_description=model_description, devices=devices
161                    )
162                except Exception as e:
163                    errors.append(e)
164            else:
165                assert_never(wf)
166
167        assert errors
168        if len(weight_format_priority_order) == 1:
169            assert len(errors) == 1
170            raise errors[0]
171
172        else:
173            msg = (
174                "None of the weight format specific model adapters could be created"
175                + " in this environment."
176            )
177            raise ExceptionGroup(msg, errors)

Creates model adapter based on the passed spec Note: All specific adapters should happen inside this function to prevent different framework initializations interfering with each other

def create_prediction_pipeline( bioimageio_model: Annotated[Union[Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], Annotated[bioimageio.spec.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')], *, devices: Optional[Sequence[str]] = None, weight_format: Optional[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_saved_model_bundle', 'torchscript']] = None, weights_format: Optional[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_saved_model_bundle', 'torchscript']] = None, dataset_for_initial_statistics: Iterable[Union[Sample, Sequence[Tensor]]] = (), keep_updating_initial_dataset_statistics: bool = False, fixed_dataset_statistics: Mapping[Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Union[float, Annotated[Tensor, BeforeValidator(func=<function tensor_custom_before_validator>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer>, return_type=PydanticUndefined, when_used='always')]]] = mappingproxy({}), model_adapter: Optional[bioimageio.core.backends._model_adapter.ModelAdapter] = None, ns: Union[int, Mapping[Tuple[bioimageio.spec.model.v0_5.TensorId, AxisId], int], NoneType] = None, default_blocksize_parameter: Union[int, Mapping[Tuple[bioimageio.spec.model.v0_5.TensorId, AxisId], int]] = 10, **deprecated_kwargs: Any) -> PredictionPipeline:
317def create_prediction_pipeline(
318    bioimageio_model: AnyModelDescr,
319    *,
320    devices: Optional[Sequence[str]] = None,
321    weight_format: Optional[SupportedWeightsFormat] = None,
322    weights_format: Optional[SupportedWeightsFormat] = None,
323    dataset_for_initial_statistics: Iterable[Union[Sample, Sequence[Tensor]]] = tuple(),
324    keep_updating_initial_dataset_statistics: bool = False,
325    fixed_dataset_statistics: Mapping[DatasetMeasure, MeasureValue] = MappingProxyType(
326        {}
327    ),
328    model_adapter: Optional[ModelAdapter] = None,
329    ns: Optional[BlocksizeParameter] = None,
330    default_blocksize_parameter: BlocksizeParameter = 10,
331    **deprecated_kwargs: Any,
332) -> PredictionPipeline:
333    """
334    Creates prediction pipeline which includes:
335    * computation of input statistics
336    * preprocessing
337    * model prediction
338    * computation of output statistics
339    * postprocessing
340
341    Args:
342        bioimageio_model: A bioimageio model description.
343        devices: (optional)
344        weight_format: deprecated in favor of **weights_format**
345        weights_format: (optional) Use a specific **weights_format** rather than
346            choosing one automatically.
347            A corresponding `bioimageio.core.model_adapters.ModelAdapter` will be
348            created to run inference with the **bioimageio_model**.
349        dataset_for_initial_statistics: (optional) If preprocessing steps require input
350            dataset statistics, **dataset_for_initial_statistics** allows you to
351            specifcy a dataset from which these statistics are computed.
352        keep_updating_initial_dataset_statistics: (optional) Set to `True` if you want
353            to update dataset statistics with each processed sample.
354        fixed_dataset_statistics: (optional) Allows you to specify a mapping of
355            `DatasetMeasure`s to precomputed `MeasureValue`s.
356        model_adapter: (optional) Allows you to use a custom **model_adapter** instead
357            of creating one according to the present/selected **weights_format**.
358        ns: deprecated in favor of **default_blocksize_parameter**
359        default_blocksize_parameter: Allows to control the default block size for
360            blockwise predictions, see `BlocksizeParameter`.
361
362    """
363    weights_format = weight_format or weights_format
364    del weight_format
365    default_blocksize_parameter = ns or default_blocksize_parameter
366    del ns
367    if deprecated_kwargs:
368        warnings.warn(
369            f"deprecated create_prediction_pipeline kwargs: {set(deprecated_kwargs)}"
370        )
371
372    model_adapter = model_adapter or create_model_adapter(
373        model_description=bioimageio_model,
374        devices=devices,
375        weight_format_priority_order=weights_format and (weights_format,),
376    )
377
378    input_ids = get_member_ids(bioimageio_model.inputs)
379
380    def dataset():
381        common_stat: Stat = {}
382        for i, x in enumerate(dataset_for_initial_statistics):
383            if isinstance(x, Sample):
384                yield x
385            else:
386                yield Sample(members=dict(zip(input_ids, x)), stat=common_stat, id=i)
387
388    preprocessing, postprocessing = setup_pre_and_postprocessing(
389        bioimageio_model,
390        dataset(),
391        keep_updating_initial_dataset_stats=keep_updating_initial_dataset_statistics,
392        fixed_dataset_stats=fixed_dataset_statistics,
393    )
394
395    return PredictionPipeline(
396        name=bioimageio_model.name,
397        model_description=bioimageio_model,
398        model_adapter=model_adapter,
399        preprocessing=preprocessing,
400        postprocessing=postprocessing,
401        default_blocksize_parameter=default_blocksize_parameter,
402    )

Creates prediction pipeline which includes:

  • computation of input statistics
  • preprocessing
  • model prediction
  • computation of output statistics
  • postprocessing
Arguments:
  • bioimageio_model: A bioimageio model description.
  • devices: (optional)
  • weight_format: deprecated in favor of weights_format
  • weights_format: (optional) Use a specific weights_format rather than choosing one automatically. A corresponding bioimageio.core.model_adapters.ModelAdapter will be created to run inference with the bioimageio_model.
  • dataset_for_initial_statistics: (optional) If preprocessing steps require input dataset statistics, dataset_for_initial_statistics allows you to specifcy a dataset from which these statistics are computed.
  • keep_updating_initial_dataset_statistics: (optional) Set to True if you want to update dataset statistics with each processed sample.
  • fixed_dataset_statistics: (optional) Allows you to specify a mapping of DatasetMeasures to precomputed MeasureValues.
  • model_adapter: (optional) Allows you to use a custom model_adapter instead of creating one according to the present/selected weights_format.
  • ns: deprecated in favor of default_blocksize_parameter
  • default_blocksize_parameter: Allows to control the default block size for blockwise predictions, see BlocksizeParameter.
def dump_description( rd: Union[Annotated[Union[Annotated[Union[Annotated[bioimageio.spec.application.v0_2.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.2')], Annotated[bioimageio.spec.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='application')], Annotated[Union[Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], Annotated[bioimageio.spec.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')], Annotated[Union[Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], Annotated[bioimageio.spec.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')], Annotated[Union[Annotated[bioimageio.spec.NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.2')], Annotated[bioimageio.spec.NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='notebook')]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.generic.v0_2.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.2')], Annotated[bioimageio.spec.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='generic')], bioimageio.spec.InvalidDescr], /, *, exclude_unset: bool = True, exclude_defaults: bool = False) -> Dict[str, YamlValue]:
66def dump_description(
67    rd: Union[ResourceDescr, InvalidDescr],
68    /,
69    *,
70    exclude_unset: bool = True,
71    exclude_defaults: bool = False,
72) -> BioimageioYamlContent:
73    """Converts a resource to a dictionary containing only simple types that can directly be serialzed to YAML.
74
75    Args:
76        rd: bioimageio resource description
77        exclude_unset: Exclude fields that have not explicitly be set.
78        exclude_defaults: Exclude fields that have the default value (even if set explicitly).
79    """
80    return rd.model_dump(
81        mode="json", exclude_unset=exclude_unset, exclude_defaults=exclude_defaults
82    )

Converts a resource to a dictionary containing only simple types that can directly be serialzed to YAML.

Arguments:
  • rd: bioimageio resource description
  • exclude_unset: Exclude fields that have not explicitly be set.
  • exclude_defaults: Exclude fields that have the default value (even if set explicitly).
def enable_determinism( mode: Literal['seed_only', 'full'] = 'full', weight_formats: Optional[Sequence[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_saved_model_bundle', 'torchscript']]] = None):
 84def enable_determinism(
 85    mode: Literal["seed_only", "full"] = "full",
 86    weight_formats: Optional[Sequence[SupportedWeightsFormat]] = None,
 87):
 88    """Seed and configure ML frameworks for maximum reproducibility.
 89    May degrade performance. Only recommended for testing reproducibility!
 90
 91    Seed any random generators and (if **mode**=="full") request ML frameworks to use
 92    deterministic algorithms.
 93
 94    Args:
 95        mode: determinism mode
 96            - 'seed_only' -- only set seeds, or
 97            - 'full' determinsm features (might degrade performance or throw exceptions)
 98        weight_formats: Limit deep learning importing deep learning frameworks
 99            based on weight_formats.
100            E.g. this allows to avoid importing tensorflow when testing with pytorch.
101
102    Notes:
103        - **mode** == "full"  might degrade performance or throw exceptions.
104        - Subsequent inference calls might still differ. Call before each function
105          (sequence) that is expected to be reproducible.
106        - Degraded performance: Use for testing reproducibility only!
107        - Recipes:
108            - [PyTorch](https://pytorch.org/docs/stable/notes/randomness.html)
109            - [Keras](https://keras.io/examples/keras_recipes/reproducibility_recipes/)
110            - [NumPy](https://numpy.org/doc/2.0/reference/random/generated/numpy.random.seed.html)
111    """
112    try:
113        try:
114            import numpy.random
115        except ImportError:
116            pass
117        else:
118            numpy.random.seed(0)
119    except Exception as e:
120        logger.debug(str(e))
121
122    if (
123        weight_formats is None
124        or "pytorch_state_dict" in weight_formats
125        or "torchscript" in weight_formats
126    ):
127        try:
128            try:
129                import torch
130            except ImportError:
131                pass
132            else:
133                _ = torch.manual_seed(0)
134                torch.use_deterministic_algorithms(mode == "full")
135        except Exception as e:
136            logger.debug(str(e))
137
138    if (
139        weight_formats is None
140        or "tensorflow_saved_model_bundle" in weight_formats
141        or "keras_hdf5" in weight_formats
142    ):
143        try:
144            os.environ["TF_ENABLE_ONEDNN_OPTS"] = "0"
145            try:
146                import tensorflow as tf  # pyright: ignore[reportMissingTypeStubs]
147            except ImportError:
148                pass
149            else:
150                tf.random.set_seed(0)
151                if mode == "full":
152                    tf.config.experimental.enable_op_determinism()
153                # TODO: find possibility to switch it off again??
154        except Exception as e:
155            logger.debug(str(e))
156
157    if weight_formats is None or "keras_hdf5" in weight_formats:
158        try:
159            try:
160                import keras  # pyright: ignore[reportMissingTypeStubs]
161            except ImportError:
162                pass
163            else:
164                keras.utils.set_random_seed(0)
165        except Exception as e:
166            logger.debug(str(e))

Seed and configure ML frameworks for maximum reproducibility. May degrade performance. Only recommended for testing reproducibility!

Seed any random generators and (if mode=="full") request ML frameworks to use deterministic algorithms.

Arguments:
  • mode: determinism mode
    • 'seed_only' -- only set seeds, or
    • 'full' determinsm features (might degrade performance or throw exceptions)
  • weight_formats: Limit deep learning importing deep learning frameworks based on weight_formats. E.g. this allows to avoid importing tensorflow when testing with pytorch.
Notes:
  • mode == "full" might degrade performance or throw exceptions.
  • Subsequent inference calls might still differ. Call before each function (sequence) that is expected to be reproducible.
  • Degraded performance: Use for testing reproducibility only!
  • Recipes:
def load_dataset_description( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file'), FieldInfo(annotation=NoneType, required=True, title='FilePath')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, pydantic.networks.HttpUrl, zipfile.ZipFile], /, *, format_version: Union[Literal['latest', 'discover'], str] = 'discover', perform_io_checks: Optional[bool] = None, known_files: Optional[Dict[str, Optional[bioimageio.spec._internal.io_basics.Sha256]]] = None, sha256: Optional[bioimageio.spec._internal.io_basics.Sha256] = None) -> Annotated[Union[Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], Annotated[bioimageio.spec.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')]:
180def load_dataset_description(
181    source: Union[PermissiveFileSource, ZipFile],
182    /,
183    *,
184    format_version: Union[FormatVersionPlaceholder, str] = DISCOVER,
185    perform_io_checks: Optional[bool] = None,
186    known_files: Optional[Dict[str, Optional[Sha256]]] = None,
187    sha256: Optional[Sha256] = None,
188) -> AnyDatasetDescr:
189    """same as `load_description`, but addtionally ensures that the loaded
190    description is valid and of type 'dataset'.
191    """
192    rd = load_description(
193        source,
194        format_version=format_version,
195        perform_io_checks=perform_io_checks,
196        known_files=known_files,
197        sha256=sha256,
198    )
199    return ensure_description_is_dataset(rd)

same as load_description, but addtionally ensures that the loaded description is valid and of type 'dataset'.

def load_description_and_test( source: Union[Annotated[Union[Annotated[Union[Annotated[bioimageio.spec.application.v0_2.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.2')], Annotated[bioimageio.spec.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='application')], Annotated[Union[Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], Annotated[bioimageio.spec.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')], Annotated[Union[Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], Annotated[bioimageio.spec.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')], Annotated[Union[Annotated[bioimageio.spec.NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.2')], Annotated[bioimageio.spec.NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='notebook')]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.generic.v0_2.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.2')], Annotated[bioimageio.spec.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='generic')], Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file'), FieldInfo(annotation=NoneType, required=True, title='FilePath')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, pydantic.networks.HttpUrl, Dict[str, YamlValue]], *, format_version: Union[Literal['latest', 'discover'], str] = 'discover', weight_format: Optional[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_saved_model_bundle', 'torchscript']] = None, devices: Optional[Sequence[str]] = None, determinism: Literal['seed_only', 'full'] = 'seed_only', expected_type: Optional[str] = None, sha256: Optional[bioimageio.spec._internal.io_basics.Sha256] = None, stop_early: bool = True, **deprecated: Unpack[bioimageio.core._resource_tests.DeprecatedKwargs]) -> Union[Annotated[Union[Annotated[Union[Annotated[bioimageio.spec.application.v0_2.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.2')], Annotated[bioimageio.spec.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='application')], Annotated[Union[Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], Annotated[bioimageio.spec.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')], Annotated[Union[Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], Annotated[bioimageio.spec.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')], Annotated[Union[Annotated[bioimageio.spec.NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.2')], Annotated[bioimageio.spec.NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='notebook')]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.generic.v0_2.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.2')], Annotated[bioimageio.spec.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='generic')], bioimageio.spec.InvalidDescr]:
565def load_description_and_test(
566    source: Union[ResourceDescr, PermissiveFileSource, BioimageioYamlContent],
567    *,
568    format_version: Union[FormatVersionPlaceholder, str] = DISCOVER,
569    weight_format: Optional[SupportedWeightsFormat] = None,
570    devices: Optional[Sequence[str]] = None,
571    determinism: Literal["seed_only", "full"] = "seed_only",
572    expected_type: Optional[str] = None,
573    sha256: Optional[Sha256] = None,
574    stop_early: bool = True,
575    **deprecated: Unpack[DeprecatedKwargs],
576) -> Union[ResourceDescr, InvalidDescr]:
577    """Test a bioimage.io resource dynamically,
578    for example run prediction of test tensors for models.
579
580    See `test_description` for more details.
581
582    Returns:
583        A (possibly invalid) resource description object
584        with a populated `.validation_summary` attribute.
585    """
586    if isinstance(source, ResourceDescrBase):
587        root = source.root
588        file_name = source.file_name
589        if (
590            (
591                format_version
592                not in (
593                    DISCOVER,
594                    source.format_version,
595                    ".".join(source.format_version.split(".")[:2]),
596                )
597            )
598            or (c := source.validation_summary.details[0].context) is None
599            or not c.perform_io_checks
600        ):
601            logger.debug(
602                "deserializing source to ensure we validate and test using format {} and perform io checks",
603                format_version,
604            )
605            source = dump_description(source)
606    else:
607        root = Path()
608        file_name = None
609
610    if isinstance(source, ResourceDescrBase):
611        rd = source
612    elif isinstance(source, dict):
613        # check context for a given root; default to root of source
614        context = get_validation_context(
615            ValidationContext(root=root, file_name=file_name)
616        ).replace(
617            perform_io_checks=True  # make sure we perform io checks though
618        )
619
620        rd = build_description(
621            source,
622            format_version=format_version,
623            context=context,
624        )
625    else:
626        rd = load_description(
627            source, format_version=format_version, sha256=sha256, perform_io_checks=True
628        )
629
630    rd.validation_summary.env.add(
631        InstalledPackage(name="bioimageio.core", version=__version__)
632    )
633
634    if expected_type is not None:
635        _test_expected_resource_type(rd, expected_type)
636
637    if isinstance(rd, (v0_4.ModelDescr, v0_5.ModelDescr)):
638        if weight_format is None:
639            weight_formats: List[SupportedWeightsFormat] = [
640                w for w, we in rd.weights if we is not None
641            ]  # pyright: ignore[reportAssignmentType]
642        else:
643            weight_formats = [weight_format]
644
645        enable_determinism(determinism, weight_formats=weight_formats)
646        for w in weight_formats:
647            _test_model_inference(rd, w, devices, stop_early=stop_early, **deprecated)
648            if stop_early and rd.validation_summary.status == "failed":
649                break
650
651            if not isinstance(rd, v0_4.ModelDescr):
652                _test_model_inference_parametrized(
653                    rd, w, devices, stop_early=stop_early
654                )
655                if stop_early and rd.validation_summary.status == "failed":
656                    break
657
658    # TODO: add execution of jupyter notebooks
659    # TODO: add more tests
660
661    if rd.validation_summary.status == "valid-format":
662        rd.validation_summary.status = "passed"
663
664    return rd

Test a bioimage.io resource dynamically, for example run prediction of test tensors for models.

See test_description for more details.

Returns:

A (possibly invalid) resource description object with a populated .validation_summary attribute.

def load_description_and_validate_format_only( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file'), FieldInfo(annotation=NoneType, required=True, title='FilePath')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, pydantic.networks.HttpUrl, zipfile.ZipFile], /, *, format_version: Union[Literal['latest', 'discover'], str] = 'discover', perform_io_checks: Optional[bool] = None, known_files: Optional[Dict[str, Optional[bioimageio.spec._internal.io_basics.Sha256]]] = None, sha256: Optional[bioimageio.spec._internal.io_basics.Sha256] = None) -> ValidationSummary:
232def load_description_and_validate_format_only(
233    source: Union[PermissiveFileSource, ZipFile],
234    /,
235    *,
236    format_version: Union[FormatVersionPlaceholder, str] = DISCOVER,
237    perform_io_checks: Optional[bool] = None,
238    known_files: Optional[Dict[str, Optional[Sha256]]] = None,
239    sha256: Optional[Sha256] = None,
240) -> ValidationSummary:
241    """same as `load_description`, but only return the validation summary.
242
243    Returns:
244        Validation summary of the bioimage.io resource found at `source`.
245
246    """
247    rd = load_description(
248        source,
249        format_version=format_version,
250        perform_io_checks=perform_io_checks,
251        known_files=known_files,
252        sha256=sha256,
253    )
254    assert rd.validation_summary is not None
255    return rd.validation_summary

same as load_description, but only return the validation summary.

Returns:

Validation summary of the bioimage.io resource found at source.

def load_description( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file'), FieldInfo(annotation=NoneType, required=True, title='FilePath')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, pydantic.networks.HttpUrl, zipfile.ZipFile], /, *, format_version: Union[Literal['latest', 'discover'], str] = 'discover', perform_io_checks: Optional[bool] = None, known_files: Optional[Dict[str, Optional[bioimageio.spec._internal.io_basics.Sha256]]] = None, sha256: Optional[bioimageio.spec._internal.io_basics.Sha256] = None) -> Union[Annotated[Union[Annotated[Union[Annotated[bioimageio.spec.application.v0_2.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.2')], Annotated[bioimageio.spec.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='application')], Annotated[Union[Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], Annotated[bioimageio.spec.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')], Annotated[Union[Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], Annotated[bioimageio.spec.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')], Annotated[Union[Annotated[bioimageio.spec.NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.2')], Annotated[bioimageio.spec.NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='notebook')]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.generic.v0_2.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.2')], Annotated[bioimageio.spec.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='generic')], bioimageio.spec.InvalidDescr]:
 57def load_description(
 58    source: Union[PermissiveFileSource, ZipFile],
 59    /,
 60    *,
 61    format_version: Union[FormatVersionPlaceholder, str] = DISCOVER,
 62    perform_io_checks: Optional[bool] = None,
 63    known_files: Optional[Dict[str, Optional[Sha256]]] = None,
 64    sha256: Optional[Sha256] = None,
 65) -> Union[ResourceDescr, InvalidDescr]:
 66    """load a bioimage.io resource description
 67
 68    Args:
 69        source: Path or URL to an rdf.yaml or a bioimage.io package
 70                (zip-file with rdf.yaml in it).
 71        format_version: (optional) Use this argument to load the resource and
 72                        convert its metadata to a higher format_version.
 73        perform_io_checks: Wether or not to perform validation that requires file io,
 74                           e.g. downloading a remote files. The existence of local
 75                           absolute file paths is still being checked.
 76        known_files: Allows to bypass download and hashing of referenced files
 77                     (even if perform_io_checks is True).
 78                     Checked files will be added to this dictionary
 79                     with their SHA-256 value.
 80        sha256: Optional SHA-256 value of **source**
 81
 82    Returns:
 83        An object holding all metadata of the bioimage.io resource
 84
 85    """
 86    if isinstance(source, ResourceDescrBase):
 87        name = getattr(source, "name", f"{str(source)[:10]}...")
 88        logger.warning("returning already loaded description '{}' as is", name)
 89        return source  # pyright: ignore[reportReturnType]
 90
 91    opened = open_bioimageio_yaml(source, sha256=sha256)
 92
 93    context = get_validation_context().replace(
 94        root=opened.original_root,
 95        file_name=opened.original_file_name,
 96        perform_io_checks=perform_io_checks,
 97        known_files=known_files,
 98    )
 99
100    return build_description(
101        opened.content,
102        context=context,
103        format_version=format_version,
104    )

load a bioimage.io resource description

Arguments:
  • source: Path or URL to an rdf.yaml or a bioimage.io package (zip-file with rdf.yaml in it).
  • format_version: (optional) Use this argument to load the resource and convert its metadata to a higher format_version.
  • perform_io_checks: Wether or not to perform validation that requires file io, e.g. downloading a remote files. The existence of local absolute file paths is still being checked.
  • known_files: Allows to bypass download and hashing of referenced files (even if perform_io_checks is True). Checked files will be added to this dictionary with their SHA-256 value.
  • sha256: Optional SHA-256 value of source
Returns:

An object holding all metadata of the bioimage.io resource

def load_model_description( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file'), FieldInfo(annotation=NoneType, required=True, title='FilePath')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, pydantic.networks.HttpUrl, zipfile.ZipFile], /, *, format_version: Union[Literal['latest', 'discover'], str] = 'discover', perform_io_checks: Optional[bool] = None, known_files: Optional[Dict[str, Optional[bioimageio.spec._internal.io_basics.Sha256]]] = None, sha256: Optional[bioimageio.spec._internal.io_basics.Sha256] = None) -> Annotated[Union[Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], Annotated[bioimageio.spec.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')]:
131def load_model_description(
132    source: Union[PermissiveFileSource, ZipFile],
133    /,
134    *,
135    format_version: Union[FormatVersionPlaceholder, str] = DISCOVER,
136    perform_io_checks: Optional[bool] = None,
137    known_files: Optional[Dict[str, Optional[Sha256]]] = None,
138    sha256: Optional[Sha256] = None,
139) -> AnyModelDescr:
140    """same as `load_description`, but addtionally ensures that the loaded
141    description is valid and of type 'model'.
142
143    Raises:
144        ValueError: for invalid or non-model resources
145    """
146    rd = load_description(
147        source,
148        format_version=format_version,
149        perform_io_checks=perform_io_checks,
150        known_files=known_files,
151        sha256=sha256,
152    )
153    return ensure_description_is_model(rd)

same as load_description, but addtionally ensures that the loaded description is valid and of type 'model'.

Raises:
  • ValueError: for invalid or non-model resources
def load_model( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file'), FieldInfo(annotation=NoneType, required=True, title='FilePath')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, pydantic.networks.HttpUrl, zipfile.ZipFile], /, *, format_version: Union[Literal['latest', 'discover'], str] = 'discover', perform_io_checks: Optional[bool] = None, known_files: Optional[Dict[str, Optional[bioimageio.spec._internal.io_basics.Sha256]]] = None, sha256: Optional[bioimageio.spec._internal.io_basics.Sha256] = None) -> Annotated[Union[Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], Annotated[bioimageio.spec.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')]:
131def load_model_description(
132    source: Union[PermissiveFileSource, ZipFile],
133    /,
134    *,
135    format_version: Union[FormatVersionPlaceholder, str] = DISCOVER,
136    perform_io_checks: Optional[bool] = None,
137    known_files: Optional[Dict[str, Optional[Sha256]]] = None,
138    sha256: Optional[Sha256] = None,
139) -> AnyModelDescr:
140    """same as `load_description`, but addtionally ensures that the loaded
141    description is valid and of type 'model'.
142
143    Raises:
144        ValueError: for invalid or non-model resources
145    """
146    rd = load_description(
147        source,
148        format_version=format_version,
149        perform_io_checks=perform_io_checks,
150        known_files=known_files,
151        sha256=sha256,
152    )
153    return ensure_description_is_model(rd)
def load_resource( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file'), FieldInfo(annotation=NoneType, required=True, title='FilePath')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, pydantic.networks.HttpUrl, zipfile.ZipFile], /, *, format_version: Union[Literal['latest', 'discover'], str] = 'discover', perform_io_checks: Optional[bool] = None, known_files: Optional[Dict[str, Optional[bioimageio.spec._internal.io_basics.Sha256]]] = None, sha256: Optional[bioimageio.spec._internal.io_basics.Sha256] = None) -> Union[Annotated[Union[Annotated[Union[Annotated[bioimageio.spec.application.v0_2.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.2')], Annotated[bioimageio.spec.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='application')], Annotated[Union[Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], Annotated[bioimageio.spec.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')], Annotated[Union[Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], Annotated[bioimageio.spec.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')], Annotated[Union[Annotated[bioimageio.spec.NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.2')], Annotated[bioimageio.spec.NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='notebook')]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.generic.v0_2.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.2')], Annotated[bioimageio.spec.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='generic')], bioimageio.spec.InvalidDescr]:
 57def load_description(
 58    source: Union[PermissiveFileSource, ZipFile],
 59    /,
 60    *,
 61    format_version: Union[FormatVersionPlaceholder, str] = DISCOVER,
 62    perform_io_checks: Optional[bool] = None,
 63    known_files: Optional[Dict[str, Optional[Sha256]]] = None,
 64    sha256: Optional[Sha256] = None,
 65) -> Union[ResourceDescr, InvalidDescr]:
 66    """load a bioimage.io resource description
 67
 68    Args:
 69        source: Path or URL to an rdf.yaml or a bioimage.io package
 70                (zip-file with rdf.yaml in it).
 71        format_version: (optional) Use this argument to load the resource and
 72                        convert its metadata to a higher format_version.
 73        perform_io_checks: Wether or not to perform validation that requires file io,
 74                           e.g. downloading a remote files. The existence of local
 75                           absolute file paths is still being checked.
 76        known_files: Allows to bypass download and hashing of referenced files
 77                     (even if perform_io_checks is True).
 78                     Checked files will be added to this dictionary
 79                     with their SHA-256 value.
 80        sha256: Optional SHA-256 value of **source**
 81
 82    Returns:
 83        An object holding all metadata of the bioimage.io resource
 84
 85    """
 86    if isinstance(source, ResourceDescrBase):
 87        name = getattr(source, "name", f"{str(source)[:10]}...")
 88        logger.warning("returning already loaded description '{}' as is", name)
 89        return source  # pyright: ignore[reportReturnType]
 90
 91    opened = open_bioimageio_yaml(source, sha256=sha256)
 92
 93    context = get_validation_context().replace(
 94        root=opened.original_root,
 95        file_name=opened.original_file_name,
 96        perform_io_checks=perform_io_checks,
 97        known_files=known_files,
 98    )
 99
100    return build_description(
101        opened.content,
102        context=context,
103        format_version=format_version,
104    )
def predict_many( *, model: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file'), FieldInfo(annotation=NoneType, required=True, title='FilePath')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, pydantic.networks.HttpUrl, bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.ModelDescr, PredictionPipeline], inputs: Union[Iterable[Mapping[bioimageio.spec.model.v0_5.TensorId, Union[Tensor, xarray.core.dataarray.DataArray, numpy.ndarray[Any, numpy.dtype[Any]], pathlib.Path]]], Iterable[Union[Tensor, xarray.core.dataarray.DataArray, numpy.ndarray[Any, numpy.dtype[Any]], pathlib.Path]]], sample_id: str = 'sample{i:03}', blocksize_parameter: Union[int, Mapping[Tuple[bioimageio.spec.model.v0_5.TensorId, AxisId], int], NoneType] = None, skip_preprocessing: bool = False, skip_postprocessing: bool = False, save_output_path: Union[pathlib.Path, str, NoneType] = None) -> Iterator[Sample]:
131def predict_many(
132    *,
133    model: Union[
134        PermissiveFileSource, v0_4.ModelDescr, v0_5.ModelDescr, PredictionPipeline
135    ],
136    inputs: Union[Iterable[PerMember[TensorSource]], Iterable[TensorSource]],
137    sample_id: str = "sample{i:03}",
138    blocksize_parameter: Optional[
139        Union[
140            v0_5.ParameterizedSize_N,
141            Mapping[Tuple[MemberId, AxisId], v0_5.ParameterizedSize_N],
142        ]
143    ] = None,
144    skip_preprocessing: bool = False,
145    skip_postprocessing: bool = False,
146    save_output_path: Optional[Union[Path, str]] = None,
147) -> Iterator[Sample]:
148    """Run prediction for a multiple sets of inputs with a bioimage.io model
149
150    Args:
151        model: Model to predict with.
152            May be given as RDF source, model description or prediction pipeline.
153        inputs: An iterable of the named input(s) for this model as a dictionary.
154        sample_id: The sample id.
155            note: `{i}` will be formatted as the i-th sample.
156            If `{i}` (or `{i:`) is not present and `inputs` is not an iterable `{i:03}`
157            is appended.
158        blocksize_parameter: (optional) Tile the input into blocks parametrized by
159            blocksize according to any parametrized axis sizes defined in the model RDF.
160        skip_preprocessing: Flag to skip the model's preprocessing.
161        skip_postprocessing: Flag to skip the model's postprocessing.
162        save_output_path: A path to save the output to.
163            Must contain:
164            - `{sample_id}` to differentiate predicted samples
165            - `{output_id}` (or `{member_id}`) if the model has multiple outputs
166    """
167    if save_output_path is not None and "{sample_id}" not in str(save_output_path):
168        raise ValueError(
169            f"Missing `{{sample_id}}` in save_output_path={save_output_path}"
170            + " to differentiate predicted samples."
171        )
172
173    if isinstance(model, PredictionPipeline):
174        pp = model
175    else:
176        if not isinstance(model, (v0_4.ModelDescr, v0_5.ModelDescr)):
177            loaded = load_description(model)
178            if not isinstance(loaded, (v0_4.ModelDescr, v0_5.ModelDescr)):
179                raise ValueError(f"expected model description, but got {loaded}")
180            model = loaded
181
182        pp = create_prediction_pipeline(model)
183
184    if not isinstance(inputs, collections.abc.Mapping):
185        if "{i}" not in sample_id and "{i:" not in sample_id:
186            sample_id += "{i:03}"
187
188        total = len(inputs) if isinstance(inputs, collections.abc.Sized) else None
189
190        for i, ipts in tqdm(enumerate(inputs), total=total):
191            yield predict(
192                model=pp,
193                inputs=ipts,
194                sample_id=sample_id.format(i=i),
195                blocksize_parameter=blocksize_parameter,
196                skip_preprocessing=skip_preprocessing,
197                skip_postprocessing=skip_postprocessing,
198                save_output_path=save_output_path,
199            )

Run prediction for a multiple sets of inputs with a bioimage.io model

Arguments:
  • model: Model to predict with. May be given as RDF source, model description or prediction pipeline.
  • inputs: An iterable of the named input(s) for this model as a dictionary.
  • sample_id: The sample id. note: {i} will be formatted as the i-th sample. If {i} (or {i:) is not present and inputs is not an iterable {i:03} is appended.
  • blocksize_parameter: (optional) Tile the input into blocks parametrized by blocksize according to any parametrized axis sizes defined in the model RDF.
  • skip_preprocessing: Flag to skip the model's preprocessing.
  • skip_postprocessing: Flag to skip the model's postprocessing.
  • save_output_path: A path to save the output to. Must contain:
    • {sample_id} to differentiate predicted samples
    • {output_id} (or {member_id}) if the model has multiple outputs
def predict( *, model: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file'), FieldInfo(annotation=NoneType, required=True, title='FilePath')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, pydantic.networks.HttpUrl, bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.ModelDescr, PredictionPipeline], inputs: Union[Sample, Mapping[bioimageio.spec.model.v0_5.TensorId, Union[Tensor, xarray.core.dataarray.DataArray, numpy.ndarray[Any, numpy.dtype[Any]], pathlib.Path]], Tensor, xarray.core.dataarray.DataArray, numpy.ndarray[Any, numpy.dtype[Any]], pathlib.Path], sample_id: Hashable = 'sample', blocksize_parameter: Union[int, Mapping[Tuple[bioimageio.spec.model.v0_5.TensorId, AxisId], int], NoneType] = None, input_block_shape: Optional[Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[AxisId, int]]] = None, skip_preprocessing: bool = False, skip_postprocessing: bool = False, save_output_path: Union[pathlib.Path, str, NoneType] = None) -> Sample:
 29def predict(
 30    *,
 31    model: Union[
 32        PermissiveFileSource, v0_4.ModelDescr, v0_5.ModelDescr, PredictionPipeline
 33    ],
 34    inputs: Union[Sample, PerMember[TensorSource], TensorSource],
 35    sample_id: Hashable = "sample",
 36    blocksize_parameter: Optional[BlocksizeParameter] = None,
 37    input_block_shape: Optional[Mapping[MemberId, Mapping[AxisId, int]]] = None,
 38    skip_preprocessing: bool = False,
 39    skip_postprocessing: bool = False,
 40    save_output_path: Optional[Union[Path, str]] = None,
 41) -> Sample:
 42    """Run prediction for a single set of input(s) with a bioimage.io model
 43
 44    Args:
 45        model: Model to predict with.
 46            May be given as RDF source, model description or prediction pipeline.
 47        inputs: the input sample or the named input(s) for this model as a dictionary
 48        sample_id: the sample id.
 49            The **sample_id** is used to format **save_output_path**
 50            and to distinguish sample specific log messages.
 51        blocksize_parameter: (optional) Tile the input into blocks parametrized by
 52            **blocksize_parameter** according to any parametrized axis sizes defined
 53            by the **model**.
 54            See `bioimageio.spec.model.v0_5.ParameterizedSize` for details.
 55            Note: For a predetermined, fixed block shape use **input_block_shape**.
 56        input_block_shape: (optional) Tile the input sample tensors into blocks.
 57            Note: Use **blocksize_parameter** for a parameterized block shape to
 58                run prediction independent of the exact block shape.
 59        skip_preprocessing: Flag to skip the model's preprocessing.
 60        skip_postprocessing: Flag to skip the model's postprocessing.
 61        save_output_path: A path with to save the output to. M
 62            Must contain:
 63            - `{output_id}` (or `{member_id}`) if the model has multiple output tensors
 64            May contain:
 65            - `{sample_id}` to avoid overwriting recurrent calls
 66    """
 67    if isinstance(model, PredictionPipeline):
 68        pp = model
 69        model = pp.model_description
 70    else:
 71        if not isinstance(model, (v0_4.ModelDescr, v0_5.ModelDescr)):
 72            loaded = load_description(model)
 73            if not isinstance(loaded, (v0_4.ModelDescr, v0_5.ModelDescr)):
 74                raise ValueError(f"expected model description, but got {loaded}")
 75            model = loaded
 76
 77        pp = create_prediction_pipeline(model)
 78
 79    if save_output_path is not None:
 80        if (
 81            "{output_id}" not in str(save_output_path)
 82            and "{member_id}" not in str(save_output_path)
 83            and len(model.outputs) > 1
 84        ):
 85            raise ValueError(
 86                f"Missing `{{output_id}}` in save_output_path={save_output_path} to "
 87                + "distinguish model outputs "
 88                + str([get_member_id(d) for d in model.outputs])
 89            )
 90
 91    if isinstance(inputs, Sample):
 92        sample = inputs
 93    else:
 94        sample = create_sample_for_model(
 95            pp.model_description, inputs=inputs, sample_id=sample_id
 96        )
 97
 98    if input_block_shape is not None:
 99        if blocksize_parameter is not None:
100            logger.warning(
101                "ignoring blocksize_parameter={} in favor of input_block_shape={}",
102                blocksize_parameter,
103                input_block_shape,
104            )
105
106        output = pp.predict_sample_with_fixed_blocking(
107            sample,
108            input_block_shape=input_block_shape,
109            skip_preprocessing=skip_preprocessing,
110            skip_postprocessing=skip_postprocessing,
111        )
112    elif blocksize_parameter is not None:
113        output = pp.predict_sample_with_blocking(
114            sample,
115            skip_preprocessing=skip_preprocessing,
116            skip_postprocessing=skip_postprocessing,
117            ns=blocksize_parameter,
118        )
119    else:
120        output = pp.predict_sample_without_blocking(
121            sample,
122            skip_preprocessing=skip_preprocessing,
123            skip_postprocessing=skip_postprocessing,
124        )
125    if save_output_path:
126        save_sample(save_output_path, output)
127
128    return output

Run prediction for a single set of input(s) with a bioimage.io model

Arguments:
  • model: Model to predict with. May be given as RDF source, model description or prediction pipeline.
  • inputs: the input sample or the named input(s) for this model as a dictionary
  • sample_id: the sample id. The sample_id is used to format save_output_path and to distinguish sample specific log messages.
  • blocksize_parameter: (optional) Tile the input into blocks parametrized by blocksize_parameter according to any parametrized axis sizes defined by the model. See bioimageio.spec.model.v0_5.ParameterizedSize for details. Note: For a predetermined, fixed block shape use input_block_shape.
  • input_block_shape: (optional) Tile the input sample tensors into blocks. Note: Use blocksize_parameter for a parameterized block shape to run prediction independent of the exact block shape.
  • skip_preprocessing: Flag to skip the model's preprocessing.
  • skip_postprocessing: Flag to skip the model's postprocessing.
  • save_output_path: A path with to save the output to. M Must contain:
    • {output_id} (or {member_id}) if the model has multiple output tensors May contain:
    • {sample_id} to avoid overwriting recurrent calls
class PredictionPipeline:
 51class PredictionPipeline:
 52    """
 53    Represents model computation including preprocessing and postprocessing
 54    Note: Ideally use the `PredictionPipeline` in a with statement
 55        (as a context manager).
 56    """
 57
 58    def __init__(
 59        self,
 60        *,
 61        name: str,
 62        model_description: AnyModelDescr,
 63        preprocessing: List[Processing],
 64        postprocessing: List[Processing],
 65        model_adapter: ModelAdapter,
 66        default_ns: Optional[BlocksizeParameter] = None,
 67        default_blocksize_parameter: BlocksizeParameter = 10,
 68        default_batch_size: int = 1,
 69    ) -> None:
 70        """Use `create_prediction_pipeline` to create a `PredictionPipeline`"""
 71        super().__init__()
 72        default_blocksize_parameter = default_ns or default_blocksize_parameter
 73        if default_ns is not None:
 74            warnings.warn(
 75                "Argument `default_ns` is deprecated in favor of"
 76                + " `default_blocksize_paramter` and will be removed soon."
 77            )
 78        del default_ns
 79
 80        if model_description.run_mode:
 81            warnings.warn(
 82                f"Not yet implemented inference for run mode '{model_description.run_mode.name}'"
 83            )
 84
 85        self.name = name
 86        self._preprocessing = preprocessing
 87        self._postprocessing = postprocessing
 88
 89        self.model_description = model_description
 90        if isinstance(model_description, v0_4.ModelDescr):
 91            self._default_input_halo: PerMember[PerAxis[Halo]] = {}
 92            self._block_transform = None
 93        else:
 94            default_output_halo = {
 95                t.id: {
 96                    a.id: Halo(a.halo, a.halo)
 97                    for a in t.axes
 98                    if isinstance(a, v0_5.WithHalo)
 99                }
100                for t in model_description.outputs
101            }
102            self._default_input_halo = get_input_halo(
103                model_description, default_output_halo
104            )
105            self._block_transform = get_block_transform(model_description)
106
107        self._default_blocksize_parameter = default_blocksize_parameter
108        self._default_batch_size = default_batch_size
109
110        self._input_ids = get_member_ids(model_description.inputs)
111        self._output_ids = get_member_ids(model_description.outputs)
112
113        self._adapter: ModelAdapter = model_adapter
114
115    def __enter__(self):
116        self.load()
117        return self
118
119    def __exit__(self, exc_type, exc_val, exc_tb):  # type: ignore
120        self.unload()
121        return False
122
123    def predict_sample_block(
124        self,
125        sample_block: SampleBlockWithOrigin,
126        skip_preprocessing: bool = False,
127        skip_postprocessing: bool = False,
128    ) -> SampleBlock:
129        if isinstance(self.model_description, v0_4.ModelDescr):
130            raise NotImplementedError(
131                f"predict_sample_block not implemented for model {self.model_description.format_version}"
132            )
133        else:
134            assert self._block_transform is not None
135
136        if not skip_preprocessing:
137            self.apply_preprocessing(sample_block)
138
139        output_meta = sample_block.get_transformed_meta(self._block_transform)
140        local_output = self._adapter.forward(sample_block)
141
142        output = output_meta.with_data(local_output.members, stat=local_output.stat)
143        if not skip_postprocessing:
144            self.apply_postprocessing(output)
145
146        return output
147
148    def predict_sample_without_blocking(
149        self,
150        sample: Sample,
151        skip_preprocessing: bool = False,
152        skip_postprocessing: bool = False,
153    ) -> Sample:
154        """predict a sample.
155        The sample's tensor shapes have to match the model's input tensor description.
156        If that is not the case, consider `predict_sample_with_blocking`"""
157
158        if not skip_preprocessing:
159            self.apply_preprocessing(sample)
160
161        output = self._adapter.forward(sample)
162        if not skip_postprocessing:
163            self.apply_postprocessing(output)
164
165        return output
166
167    def get_output_sample_id(self, input_sample_id: SampleId):
168        warnings.warn(
169            "`PredictionPipeline.get_output_sample_id()` is deprecated and will be"
170            + " removed soon. Output sample id is equal to input sample id, hence this"
171            + " function is not needed."
172        )
173        return input_sample_id
174
175    def predict_sample_with_fixed_blocking(
176        self,
177        sample: Sample,
178        input_block_shape: Mapping[MemberId, Mapping[AxisId, int]],
179        *,
180        skip_preprocessing: bool = False,
181        skip_postprocessing: bool = False,
182    ) -> Sample:
183        if not skip_preprocessing:
184            self.apply_preprocessing(sample)
185
186        n_blocks, input_blocks = sample.split_into_blocks(
187            input_block_shape,
188            halo=self._default_input_halo,
189            pad_mode="reflect",
190        )
191        input_blocks = list(input_blocks)
192        predicted_blocks: List[SampleBlock] = []
193        logger.info(
194            "split sample shape {} into {} blocks of {}.",
195            {k: dict(v) for k, v in sample.shape.items()},
196            n_blocks,
197            {k: dict(v) for k, v in input_block_shape.items()},
198        )
199        for b in tqdm(
200            input_blocks,
201            desc=f"predict {sample.id or ''} with {self.model_description.id or self.model_description.name}",
202            unit="block",
203            unit_divisor=1,
204            total=n_blocks,
205        ):
206            predicted_blocks.append(
207                self.predict_sample_block(
208                    b, skip_preprocessing=True, skip_postprocessing=True
209                )
210            )
211
212        predicted_sample = Sample.from_blocks(predicted_blocks)
213        if not skip_postprocessing:
214            self.apply_postprocessing(predicted_sample)
215
216        return predicted_sample
217
218    def predict_sample_with_blocking(
219        self,
220        sample: Sample,
221        skip_preprocessing: bool = False,
222        skip_postprocessing: bool = False,
223        ns: Optional[
224            Union[
225                v0_5.ParameterizedSize_N,
226                Mapping[Tuple[MemberId, AxisId], v0_5.ParameterizedSize_N],
227            ]
228        ] = None,
229        batch_size: Optional[int] = None,
230    ) -> Sample:
231        """predict a sample by splitting it into blocks according to the model and the `ns` parameter"""
232
233        if isinstance(self.model_description, v0_4.ModelDescr):
234            raise NotImplementedError(
235                "`predict_sample_with_blocking` not implemented for v0_4.ModelDescr"
236                + f" {self.model_description.name}."
237                + " Consider using `predict_sample_with_fixed_blocking`"
238            )
239
240        ns = ns or self._default_blocksize_parameter
241        if isinstance(ns, int):
242            ns = {
243                (ipt.id, a.id): ns
244                for ipt in self.model_description.inputs
245                for a in ipt.axes
246                if isinstance(a.size, v0_5.ParameterizedSize)
247            }
248        input_block_shape = self.model_description.get_tensor_sizes(
249            ns, batch_size or self._default_batch_size
250        ).inputs
251
252        return self.predict_sample_with_fixed_blocking(
253            sample,
254            input_block_shape=input_block_shape,
255            skip_preprocessing=skip_preprocessing,
256            skip_postprocessing=skip_postprocessing,
257        )
258
259    # def predict(
260    #     self,
261    #     inputs: Predict_IO,
262    #     skip_preprocessing: bool = False,
263    #     skip_postprocessing: bool = False,
264    # ) -> Predict_IO:
265    #     """Run model prediction **including** pre/postprocessing."""
266
267    #     if isinstance(inputs, Sample):
268    #         return self.predict_sample_with_blocking(
269    #             inputs,
270    #             skip_preprocessing=skip_preprocessing,
271    #             skip_postprocessing=skip_postprocessing,
272    #         )
273    #     elif isinstance(inputs, collections.abc.Iterable):
274    #         return (
275    #             self.predict(
276    #                 ipt,
277    #                 skip_preprocessing=skip_preprocessing,
278    #                 skip_postprocessing=skip_postprocessing,
279    #             )
280    #             for ipt in inputs
281    #         )
282    #     else:
283    #         assert_never(inputs)
284
285    def apply_preprocessing(self, sample: Union[Sample, SampleBlockWithOrigin]) -> None:
286        """apply preprocessing in-place, also updates sample stats"""
287        for op in self._preprocessing:
288            op(sample)
289
290    def apply_postprocessing(
291        self, sample: Union[Sample, SampleBlock, SampleBlockWithOrigin]
292    ) -> None:
293        """apply postprocessing in-place, also updates samples stats"""
294        for op in self._postprocessing:
295            if isinstance(sample, (Sample, SampleBlockWithOrigin)):
296                op(sample)
297            elif not isinstance(op, BlockedOperator):
298                raise NotImplementedError(
299                    "block wise update of output statistics not yet implemented"
300                )
301            else:
302                op(sample)
303
304    def load(self):
305        """
306        optional step: load model onto devices before calling forward if not using it as context manager
307        """
308        pass
309
310    def unload(self):
311        """
312        free any device memory in use
313        """
314        self._adapter.unload()

Represents model computation including preprocessing and postprocessing Note: Ideally use the PredictionPipeline in a with statement (as a context manager).

PredictionPipeline( *, name: str, model_description: Annotated[Union[Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], Annotated[bioimageio.spec.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')], preprocessing: List[Union[bioimageio.core.proc_ops.AddKnownDatasetStats, bioimageio.core.proc_ops.Binarize, bioimageio.core.proc_ops.Clip, bioimageio.core.proc_ops.EnsureDtype, bioimageio.core.proc_ops.FixedZeroMeanUnitVariance, bioimageio.core.proc_ops.ScaleLinear, bioimageio.core.proc_ops.ScaleMeanVariance, bioimageio.core.proc_ops.ScaleRange, bioimageio.core.proc_ops.Sigmoid, bioimageio.core.proc_ops.Softmax, bioimageio.core.proc_ops.UpdateStats, bioimageio.core.proc_ops.ZeroMeanUnitVariance]], postprocessing: List[Union[bioimageio.core.proc_ops.AddKnownDatasetStats, bioimageio.core.proc_ops.Binarize, bioimageio.core.proc_ops.Clip, bioimageio.core.proc_ops.EnsureDtype, bioimageio.core.proc_ops.FixedZeroMeanUnitVariance, bioimageio.core.proc_ops.ScaleLinear, bioimageio.core.proc_ops.ScaleMeanVariance, bioimageio.core.proc_ops.ScaleRange, bioimageio.core.proc_ops.Sigmoid, bioimageio.core.proc_ops.Softmax, bioimageio.core.proc_ops.UpdateStats, bioimageio.core.proc_ops.ZeroMeanUnitVariance]], model_adapter: bioimageio.core.backends._model_adapter.ModelAdapter, default_ns: Union[int, Mapping[Tuple[bioimageio.spec.model.v0_5.TensorId, AxisId], int], NoneType] = None, default_blocksize_parameter: Union[int, Mapping[Tuple[bioimageio.spec.model.v0_5.TensorId, AxisId], int]] = 10, default_batch_size: int = 1)
 58    def __init__(
 59        self,
 60        *,
 61        name: str,
 62        model_description: AnyModelDescr,
 63        preprocessing: List[Processing],
 64        postprocessing: List[Processing],
 65        model_adapter: ModelAdapter,
 66        default_ns: Optional[BlocksizeParameter] = None,
 67        default_blocksize_parameter: BlocksizeParameter = 10,
 68        default_batch_size: int = 1,
 69    ) -> None:
 70        """Use `create_prediction_pipeline` to create a `PredictionPipeline`"""
 71        super().__init__()
 72        default_blocksize_parameter = default_ns or default_blocksize_parameter
 73        if default_ns is not None:
 74            warnings.warn(
 75                "Argument `default_ns` is deprecated in favor of"
 76                + " `default_blocksize_paramter` and will be removed soon."
 77            )
 78        del default_ns
 79
 80        if model_description.run_mode:
 81            warnings.warn(
 82                f"Not yet implemented inference for run mode '{model_description.run_mode.name}'"
 83            )
 84
 85        self.name = name
 86        self._preprocessing = preprocessing
 87        self._postprocessing = postprocessing
 88
 89        self.model_description = model_description
 90        if isinstance(model_description, v0_4.ModelDescr):
 91            self._default_input_halo: PerMember[PerAxis[Halo]] = {}
 92            self._block_transform = None
 93        else:
 94            default_output_halo = {
 95                t.id: {
 96                    a.id: Halo(a.halo, a.halo)
 97                    for a in t.axes
 98                    if isinstance(a, v0_5.WithHalo)
 99                }
100                for t in model_description.outputs
101            }
102            self._default_input_halo = get_input_halo(
103                model_description, default_output_halo
104            )
105            self._block_transform = get_block_transform(model_description)
106
107        self._default_blocksize_parameter = default_blocksize_parameter
108        self._default_batch_size = default_batch_size
109
110        self._input_ids = get_member_ids(model_description.inputs)
111        self._output_ids = get_member_ids(model_description.outputs)
112
113        self._adapter: ModelAdapter = model_adapter
name
model_description
def predict_sample_block( self, sample_block: bioimageio.core.sample.SampleBlockWithOrigin, skip_preprocessing: bool = False, skip_postprocessing: bool = False) -> bioimageio.core.sample.SampleBlock:
123    def predict_sample_block(
124        self,
125        sample_block: SampleBlockWithOrigin,
126        skip_preprocessing: bool = False,
127        skip_postprocessing: bool = False,
128    ) -> SampleBlock:
129        if isinstance(self.model_description, v0_4.ModelDescr):
130            raise NotImplementedError(
131                f"predict_sample_block not implemented for model {self.model_description.format_version}"
132            )
133        else:
134            assert self._block_transform is not None
135
136        if not skip_preprocessing:
137            self.apply_preprocessing(sample_block)
138
139        output_meta = sample_block.get_transformed_meta(self._block_transform)
140        local_output = self._adapter.forward(sample_block)
141
142        output = output_meta.with_data(local_output.members, stat=local_output.stat)
143        if not skip_postprocessing:
144            self.apply_postprocessing(output)
145
146        return output
def predict_sample_without_blocking( self, sample: Sample, skip_preprocessing: bool = False, skip_postprocessing: bool = False) -> Sample:
148    def predict_sample_without_blocking(
149        self,
150        sample: Sample,
151        skip_preprocessing: bool = False,
152        skip_postprocessing: bool = False,
153    ) -> Sample:
154        """predict a sample.
155        The sample's tensor shapes have to match the model's input tensor description.
156        If that is not the case, consider `predict_sample_with_blocking`"""
157
158        if not skip_preprocessing:
159            self.apply_preprocessing(sample)
160
161        output = self._adapter.forward(sample)
162        if not skip_postprocessing:
163            self.apply_postprocessing(output)
164
165        return output

predict a sample. The sample's tensor shapes have to match the model's input tensor description. If that is not the case, consider predict_sample_with_blocking

def get_output_sample_id(self, input_sample_id: Hashable):
167    def get_output_sample_id(self, input_sample_id: SampleId):
168        warnings.warn(
169            "`PredictionPipeline.get_output_sample_id()` is deprecated and will be"
170            + " removed soon. Output sample id is equal to input sample id, hence this"
171            + " function is not needed."
172        )
173        return input_sample_id
def predict_sample_with_fixed_blocking( self, sample: Sample, input_block_shape: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[AxisId, int]], *, skip_preprocessing: bool = False, skip_postprocessing: bool = False) -> Sample:
175    def predict_sample_with_fixed_blocking(
176        self,
177        sample: Sample,
178        input_block_shape: Mapping[MemberId, Mapping[AxisId, int]],
179        *,
180        skip_preprocessing: bool = False,
181        skip_postprocessing: bool = False,
182    ) -> Sample:
183        if not skip_preprocessing:
184            self.apply_preprocessing(sample)
185
186        n_blocks, input_blocks = sample.split_into_blocks(
187            input_block_shape,
188            halo=self._default_input_halo,
189            pad_mode="reflect",
190        )
191        input_blocks = list(input_blocks)
192        predicted_blocks: List[SampleBlock] = []
193        logger.info(
194            "split sample shape {} into {} blocks of {}.",
195            {k: dict(v) for k, v in sample.shape.items()},
196            n_blocks,
197            {k: dict(v) for k, v in input_block_shape.items()},
198        )
199        for b in tqdm(
200            input_blocks,
201            desc=f"predict {sample.id or ''} with {self.model_description.id or self.model_description.name}",
202            unit="block",
203            unit_divisor=1,
204            total=n_blocks,
205        ):
206            predicted_blocks.append(
207                self.predict_sample_block(
208                    b, skip_preprocessing=True, skip_postprocessing=True
209                )
210            )
211
212        predicted_sample = Sample.from_blocks(predicted_blocks)
213        if not skip_postprocessing:
214            self.apply_postprocessing(predicted_sample)
215
216        return predicted_sample
def predict_sample_with_blocking( self, sample: Sample, skip_preprocessing: bool = False, skip_postprocessing: bool = False, ns: Union[int, Mapping[Tuple[bioimageio.spec.model.v0_5.TensorId, AxisId], int], NoneType] = None, batch_size: Optional[int] = None) -> Sample:
218    def predict_sample_with_blocking(
219        self,
220        sample: Sample,
221        skip_preprocessing: bool = False,
222        skip_postprocessing: bool = False,
223        ns: Optional[
224            Union[
225                v0_5.ParameterizedSize_N,
226                Mapping[Tuple[MemberId, AxisId], v0_5.ParameterizedSize_N],
227            ]
228        ] = None,
229        batch_size: Optional[int] = None,
230    ) -> Sample:
231        """predict a sample by splitting it into blocks according to the model and the `ns` parameter"""
232
233        if isinstance(self.model_description, v0_4.ModelDescr):
234            raise NotImplementedError(
235                "`predict_sample_with_blocking` not implemented for v0_4.ModelDescr"
236                + f" {self.model_description.name}."
237                + " Consider using `predict_sample_with_fixed_blocking`"
238            )
239
240        ns = ns or self._default_blocksize_parameter
241        if isinstance(ns, int):
242            ns = {
243                (ipt.id, a.id): ns
244                for ipt in self.model_description.inputs
245                for a in ipt.axes
246                if isinstance(a.size, v0_5.ParameterizedSize)
247            }
248        input_block_shape = self.model_description.get_tensor_sizes(
249            ns, batch_size or self._default_batch_size
250        ).inputs
251
252        return self.predict_sample_with_fixed_blocking(
253            sample,
254            input_block_shape=input_block_shape,
255            skip_preprocessing=skip_preprocessing,
256            skip_postprocessing=skip_postprocessing,
257        )

predict a sample by splitting it into blocks according to the model and the ns parameter

def apply_preprocessing( self, sample: Union[Sample, bioimageio.core.sample.SampleBlockWithOrigin]) -> None:
285    def apply_preprocessing(self, sample: Union[Sample, SampleBlockWithOrigin]) -> None:
286        """apply preprocessing in-place, also updates sample stats"""
287        for op in self._preprocessing:
288            op(sample)

apply preprocessing in-place, also updates sample stats

def apply_postprocessing( self, sample: Union[Sample, bioimageio.core.sample.SampleBlock, bioimageio.core.sample.SampleBlockWithOrigin]) -> None:
290    def apply_postprocessing(
291        self, sample: Union[Sample, SampleBlock, SampleBlockWithOrigin]
292    ) -> None:
293        """apply postprocessing in-place, also updates samples stats"""
294        for op in self._postprocessing:
295            if isinstance(sample, (Sample, SampleBlockWithOrigin)):
296                op(sample)
297            elif not isinstance(op, BlockedOperator):
298                raise NotImplementedError(
299                    "block wise update of output statistics not yet implemented"
300                )
301            else:
302                op(sample)

apply postprocessing in-place, also updates samples stats

def load(self):
304    def load(self):
305        """
306        optional step: load model onto devices before calling forward if not using it as context manager
307        """
308        pass

optional step: load model onto devices before calling forward if not using it as context manager

def unload(self):
310    def unload(self):
311        """
312        free any device memory in use
313        """
314        self._adapter.unload()

free any device memory in use

@dataclass
class Sample:
 46@dataclass
 47class Sample:
 48    """A dataset sample.
 49
 50    A `Sample` has `members`, which allows to combine multiple tensors into a single
 51    sample.
 52    For example a `Sample` from a dataset with masked images may contain a
 53    `MemberId("raw")` and `MemberId("mask")` image.
 54    """
 55
 56    members: Dict[MemberId, Tensor]
 57    """The sample's tensors"""
 58
 59    stat: Stat
 60    """Sample and dataset statistics"""
 61
 62    id: SampleId
 63    """Identifies the `Sample` within the dataset -- typically a number or a string."""
 64
 65    @property
 66    def shape(self) -> PerMember[PerAxis[int]]:
 67        return {tid: t.sizes for tid, t in self.members.items()}
 68
 69    def as_arrays(self) -> Dict[str, NDArray[Any]]:
 70        """Return sample as dictionary of arrays."""
 71        return {str(m): t.data.to_numpy() for m, t in self.members.items()}
 72
 73    def split_into_blocks(
 74        self,
 75        block_shapes: PerMember[PerAxis[int]],
 76        halo: PerMember[PerAxis[HaloLike]],
 77        pad_mode: PadMode,
 78        broadcast: bool = False,
 79    ) -> Tuple[TotalNumberOfBlocks, Iterable[SampleBlockWithOrigin]]:
 80        assert not (
 81            missing := [m for m in block_shapes if m not in self.members]
 82        ), f"`block_shapes` specified for unknown members: {missing}"
 83        assert not (
 84            missing := [m for m in halo if m not in block_shapes]
 85        ), f"`halo` specified for members without `block_shape`: {missing}"
 86
 87        n_blocks, blocks = split_multiple_shapes_into_blocks(
 88            shapes=self.shape,
 89            block_shapes=block_shapes,
 90            halo=halo,
 91            broadcast=broadcast,
 92        )
 93        return n_blocks, sample_block_generator(blocks, origin=self, pad_mode=pad_mode)
 94
 95    def as_single_block(self, halo: Optional[PerMember[PerAxis[Halo]]] = None):
 96        if halo is None:
 97            halo = {}
 98        return SampleBlockWithOrigin(
 99            sample_shape=self.shape,
100            sample_id=self.id,
101            blocks={
102                m: Block(
103                    sample_shape=self.shape[m],
104                    data=data,
105                    inner_slice={
106                        a: SliceInfo(0, s) for a, s in data.tagged_shape.items()
107                    },
108                    halo=halo.get(m, {}),
109                    block_index=0,
110                    blocks_in_sample=1,
111                )
112                for m, data in self.members.items()
113            },
114            stat=self.stat,
115            origin=self,
116            block_index=0,
117            blocks_in_sample=1,
118        )
119
120    @classmethod
121    def from_blocks(
122        cls,
123        sample_blocks: Iterable[SampleBlock],
124        *,
125        fill_value: float = float("nan"),
126    ) -> Self:
127        members: PerMember[Tensor] = {}
128        stat: Stat = {}
129        sample_id = None
130        for sample_block in sample_blocks:
131            assert sample_id is None or sample_id == sample_block.sample_id
132            sample_id = sample_block.sample_id
133            stat = sample_block.stat
134            for m, block in sample_block.blocks.items():
135                if m not in members:
136                    if -1 in block.sample_shape.values():
137                        raise NotImplementedError(
138                            "merging blocks with data dependent axis not yet implemented"
139                        )
140
141                    members[m] = Tensor(
142                        np.full(
143                            tuple(block.sample_shape[a] for a in block.data.dims),
144                            fill_value,
145                            dtype=block.data.dtype,
146                        ),
147                        dims=block.data.dims,
148                    )
149
150                members[m][block.inner_slice] = block.inner_data
151
152        return cls(members=members, stat=stat, id=sample_id)

A dataset sample.

A Sample has members, which allows to combine multiple tensors into a single sample. For example a Sample from a dataset with masked images may contain a MemberId("raw") and MemberId("mask") image.

Sample( members: Dict[bioimageio.spec.model.v0_5.TensorId, Tensor], stat: Dict[Annotated[Union[Annotated[Union[bioimageio.core.stat_measures.SampleMean, bioimageio.core.stat_measures.SampleStd, bioimageio.core.stat_measures.SampleVar, bioimageio.core.stat_measures.SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='scope', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Union[float, Annotated[Tensor, BeforeValidator(func=<function tensor_custom_before_validator>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer>, return_type=PydanticUndefined, when_used='always')]]], id: Hashable)

The sample's tensors

stat: Dict[Annotated[Union[Annotated[Union[bioimageio.core.stat_measures.SampleMean, bioimageio.core.stat_measures.SampleStd, bioimageio.core.stat_measures.SampleVar, bioimageio.core.stat_measures.SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='scope', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Union[float, Annotated[Tensor, BeforeValidator(func=<function tensor_custom_before_validator at 0x7f838d7fd440>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer at 0x7f838d7fd620>, return_type=PydanticUndefined, when_used='always')]]]

Sample and dataset statistics

id: Hashable

Identifies the Sample within the dataset -- typically a number or a string.

shape: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[AxisId, int]]
65    @property
66    def shape(self) -> PerMember[PerAxis[int]]:
67        return {tid: t.sizes for tid, t in self.members.items()}
def as_arrays(self) -> Dict[str, numpy.ndarray[Any, numpy.dtype[Any]]]:
69    def as_arrays(self) -> Dict[str, NDArray[Any]]:
70        """Return sample as dictionary of arrays."""
71        return {str(m): t.data.to_numpy() for m, t in self.members.items()}

Return sample as dictionary of arrays.

def split_into_blocks( self, block_shapes: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[AxisId, int]], halo: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[AxisId, Union[int, Tuple[int, int], bioimageio.core.common.Halo]]], pad_mode: Literal['edge', 'reflect', 'symmetric'], broadcast: bool = False) -> Tuple[int, Iterable[bioimageio.core.sample.SampleBlockWithOrigin]]:
73    def split_into_blocks(
74        self,
75        block_shapes: PerMember[PerAxis[int]],
76        halo: PerMember[PerAxis[HaloLike]],
77        pad_mode: PadMode,
78        broadcast: bool = False,
79    ) -> Tuple[TotalNumberOfBlocks, Iterable[SampleBlockWithOrigin]]:
80        assert not (
81            missing := [m for m in block_shapes if m not in self.members]
82        ), f"`block_shapes` specified for unknown members: {missing}"
83        assert not (
84            missing := [m for m in halo if m not in block_shapes]
85        ), f"`halo` specified for members without `block_shape`: {missing}"
86
87        n_blocks, blocks = split_multiple_shapes_into_blocks(
88            shapes=self.shape,
89            block_shapes=block_shapes,
90            halo=halo,
91            broadcast=broadcast,
92        )
93        return n_blocks, sample_block_generator(blocks, origin=self, pad_mode=pad_mode)
def as_single_block( self, halo: Optional[Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[AxisId, bioimageio.core.common.Halo]]] = None):
 95    def as_single_block(self, halo: Optional[PerMember[PerAxis[Halo]]] = None):
 96        if halo is None:
 97            halo = {}
 98        return SampleBlockWithOrigin(
 99            sample_shape=self.shape,
100            sample_id=self.id,
101            blocks={
102                m: Block(
103                    sample_shape=self.shape[m],
104                    data=data,
105                    inner_slice={
106                        a: SliceInfo(0, s) for a, s in data.tagged_shape.items()
107                    },
108                    halo=halo.get(m, {}),
109                    block_index=0,
110                    blocks_in_sample=1,
111                )
112                for m, data in self.members.items()
113            },
114            stat=self.stat,
115            origin=self,
116            block_index=0,
117            blocks_in_sample=1,
118        )
@classmethod
def from_blocks( cls, sample_blocks: Iterable[bioimageio.core.sample.SampleBlock], *, fill_value: float = nan) -> Self:
120    @classmethod
121    def from_blocks(
122        cls,
123        sample_blocks: Iterable[SampleBlock],
124        *,
125        fill_value: float = float("nan"),
126    ) -> Self:
127        members: PerMember[Tensor] = {}
128        stat: Stat = {}
129        sample_id = None
130        for sample_block in sample_blocks:
131            assert sample_id is None or sample_id == sample_block.sample_id
132            sample_id = sample_block.sample_id
133            stat = sample_block.stat
134            for m, block in sample_block.blocks.items():
135                if m not in members:
136                    if -1 in block.sample_shape.values():
137                        raise NotImplementedError(
138                            "merging blocks with data dependent axis not yet implemented"
139                        )
140
141                    members[m] = Tensor(
142                        np.full(
143                            tuple(block.sample_shape[a] for a in block.data.dims),
144                            fill_value,
145                            dtype=block.data.dtype,
146                        ),
147                        dims=block.data.dims,
148                    )
149
150                members[m][block.inner_slice] = block.inner_data
151
152        return cls(members=members, stat=stat, id=sample_id)
def save_bioimageio_package_as_folder( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file'), FieldInfo(annotation=NoneType, required=True, title='FilePath')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, pydantic.networks.HttpUrl, zipfile.ZipFile, Dict[str, YamlValue], Mapping[str, YamlValueView], Annotated[Union[Annotated[Union[Annotated[bioimageio.spec.application.v0_2.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.2')], Annotated[bioimageio.spec.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='application')], Annotated[Union[Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], Annotated[bioimageio.spec.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')], Annotated[Union[Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], Annotated[bioimageio.spec.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')], Annotated[Union[Annotated[bioimageio.spec.NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.2')], Annotated[bioimageio.spec.NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='notebook')]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.generic.v0_2.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.2')], Annotated[bioimageio.spec.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='generic')]], /, *, output_path: Union[Annotated[pathlib.Path, PathType(path_type='new')], Annotated[pathlib.Path, PathType(path_type='dir')], NoneType] = None, weights_priority_order: Optional[Sequence[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript']]] = None) -> Annotated[pathlib.Path, PathType(path_type='dir')]:
150def save_bioimageio_package_as_folder(
151    source: Union[BioimageioYamlSource, ResourceDescr],
152    /,
153    *,
154    output_path: Union[NewPath, DirectoryPath, None] = None,
155    weights_priority_order: Optional[  # model only
156        Sequence[
157            Literal[
158                "keras_hdf5",
159                "onnx",
160                "pytorch_state_dict",
161                "tensorflow_js",
162                "tensorflow_saved_model_bundle",
163                "torchscript",
164            ]
165        ]
166    ] = None,
167) -> DirectoryPath:
168    """Write the content of a bioimage.io resource package to a folder.
169
170    Args:
171        source: bioimageio resource description
172        output_path: file path to write package to
173        weights_priority_order: If given only the first weights format present in the model is included.
174                                If none of the prioritized weights formats is found all are included.
175
176    Returns:
177        directory path to bioimageio package folder
178    """
179    package_content = _prepare_resource_package(
180        source,
181        weights_priority_order=weights_priority_order,
182    )
183    if output_path is None:
184        output_path = Path(mkdtemp())
185    else:
186        output_path = Path(output_path)
187
188    output_path.mkdir(exist_ok=True, parents=True)
189    for name, src in package_content.items():
190        if isinstance(src, collections.abc.Mapping):
191            write_yaml(src, output_path / name)
192        elif (
193            isinstance(src.original_root, Path)
194            and src.original_root / src.original_file_name
195            == (output_path / name).resolve()
196        ):
197            logger.debug(
198                f"Not copying {src.original_root / src.original_file_name} to itself."
199            )
200        else:
201            if isinstance(src.original_root, Path):
202                logger.debug(
203                    f"Copying from path {src.original_root / src.original_file_name} to {output_path / name}."
204                )
205            else:
206                logger.debug(
207                    f"Copying {src.original_root}/{src.original_file_name} to {output_path / name}."
208                )
209            with (output_path / name).open("wb") as dest:
210                _ = shutil.copyfileobj(src, dest)
211
212    return output_path

Write the content of a bioimage.io resource package to a folder.

Arguments:
  • source: bioimageio resource description
  • output_path: file path to write package to
  • weights_priority_order: If given only the first weights format present in the model is included. If none of the prioritized weights formats is found all are included.
Returns:

directory path to bioimageio package folder

def save_bioimageio_package( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file'), FieldInfo(annotation=NoneType, required=True, title='FilePath')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, pydantic.networks.HttpUrl, zipfile.ZipFile, Dict[str, YamlValue], Mapping[str, YamlValueView], Annotated[Union[Annotated[Union[Annotated[bioimageio.spec.application.v0_2.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.2')], Annotated[bioimageio.spec.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='application')], Annotated[Union[Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], Annotated[bioimageio.spec.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')], Annotated[Union[Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], Annotated[bioimageio.spec.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')], Annotated[Union[Annotated[bioimageio.spec.NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.2')], Annotated[bioimageio.spec.NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='notebook')]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.generic.v0_2.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.2')], Annotated[bioimageio.spec.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='generic')]], /, *, compression: int = 8, compression_level: int = 1, output_path: Union[Annotated[pathlib.Path, PathType(path_type='new')], Annotated[pathlib.Path, PathType(path_type='file')], NoneType] = None, weights_priority_order: Optional[Sequence[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript']]] = None, allow_invalid: bool = False) -> Annotated[pathlib.Path, PathType(path_type='file')]:
215def save_bioimageio_package(
216    source: Union[BioimageioYamlSource, ResourceDescr],
217    /,
218    *,
219    compression: int = ZIP_DEFLATED,
220    compression_level: int = 1,
221    output_path: Union[NewPath, FilePath, None] = None,
222    weights_priority_order: Optional[  # model only
223        Sequence[
224            Literal[
225                "keras_hdf5",
226                "onnx",
227                "pytorch_state_dict",
228                "tensorflow_js",
229                "tensorflow_saved_model_bundle",
230                "torchscript",
231            ]
232        ]
233    ] = None,
234    allow_invalid: bool = False,
235) -> FilePath:
236    """Package a bioimageio resource as a zip file.
237
238    Args:
239        rd: bioimageio resource description
240        compression: The numeric constant of compression method.
241        compression_level: Compression level to use when writing files to the archive.
242                           See https://docs.python.org/3/library/zipfile.html#zipfile.ZipFile
243        output_path: file path to write package to
244        weights_priority_order: If given only the first weights format present in the model is included.
245                                If none of the prioritized weights formats is found all are included.
246
247    Returns:
248        path to zipped bioimageio package
249    """
250    package_content = _prepare_resource_package(
251        source,
252        weights_priority_order=weights_priority_order,
253    )
254    if output_path is None:
255        output_path = Path(
256            NamedTemporaryFile(suffix=".bioimageio.zip", delete=False).name
257        )
258    else:
259        output_path = Path(output_path)
260
261    write_zip(
262        output_path,
263        package_content,
264        compression=compression,
265        compression_level=compression_level,
266    )
267    with get_validation_context().replace(warning_level=ERROR):
268        if isinstance((exported := load_description(output_path)), InvalidDescr):
269            exported.validation_summary.display()
270            msg = f"Exported package at '{output_path}' is invalid."
271            if allow_invalid:
272                logger.error(msg)
273            else:
274                raise ValueError(msg)
275
276    return output_path

Package a bioimageio resource as a zip file.

Arguments:
  • rd: bioimageio resource description
  • compression: The numeric constant of compression method.
  • compression_level: Compression level to use when writing files to the archive. See https://docs.python.org/3/library/zipfile.html#zipfile.ZipFile
  • output_path: file path to write package to
  • weights_priority_order: If given only the first weights format present in the model is included. If none of the prioritized weights formats is found all are included.
Returns:

path to zipped bioimageio package

def save_bioimageio_yaml_only( rd: Union[Annotated[Union[Annotated[Union[Annotated[bioimageio.spec.application.v0_2.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.2')], Annotated[bioimageio.spec.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='application')], Annotated[Union[Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], Annotated[bioimageio.spec.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')], Annotated[Union[Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], Annotated[bioimageio.spec.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')], Annotated[Union[Annotated[bioimageio.spec.NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.2')], Annotated[bioimageio.spec.NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='notebook')]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.generic.v0_2.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.2')], Annotated[bioimageio.spec.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='generic')], Dict[str, YamlValue], bioimageio.spec.InvalidDescr], /, file: Union[Annotated[pathlib.Path, PathType(path_type='new')], Annotated[pathlib.Path, PathType(path_type='file')], TextIO], *, exclude_unset: bool = True, exclude_defaults: bool = False):
202def save_bioimageio_yaml_only(
203    rd: Union[ResourceDescr, BioimageioYamlContent, InvalidDescr],
204    /,
205    file: Union[NewPath, FilePath, TextIO],
206    *,
207    exclude_unset: bool = True,
208    exclude_defaults: bool = False,
209):
210    """write the metadata of a resource description (`rd`) to `file`
211    without writing any of the referenced files in it.
212
213    Args:
214        rd: bioimageio resource description
215        file: file or stream to save to
216        exclude_unset: Exclude fields that have not explicitly be set.
217        exclude_defaults: Exclude fields that have the default value (even if set explicitly).
218
219    Note: To save a resource description with its associated files as a package,
220    use `save_bioimageio_package` or `save_bioimageio_package_as_folder`.
221    """
222    if isinstance(rd, ResourceDescrBase):
223        content = dump_description(
224            rd, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults
225        )
226    else:
227        content = rd
228
229    write_yaml(cast(YamlValue, content), file)

write the metadata of a resource description (rd) to file without writing any of the referenced files in it.

Arguments:
  • rd: bioimageio resource description
  • file: file or stream to save to
  • exclude_unset: Exclude fields that have not explicitly be set.
  • exclude_defaults: Exclude fields that have the default value (even if set explicitly).

Note: To save a resource description with its associated files as a package, use save_bioimageio_package or save_bioimageio_package_as_folder.

settings = Settings(allow_pickle=False, cache_path=PosixPath('/home/runner/.cache/bioimageio'), collection_http_pattern='https://hypha.aicell.io/bioimage-io/artifacts/{bioimageio_id}/files/rdf.yaml', hypha_upload='https://hypha.aicell.io/public/services/artifact-manager/create', hypha_upload_token=None, id_map='https://uk1s3.embassy.ebi.ac.uk/public-datasets/bioimage.io/id_map.json', id_map_draft='https://uk1s3.embassy.ebi.ac.uk/public-datasets/bioimage.io/id_map_draft.json', perform_io_checks=True, resolve_draft=True, log_warnings=True, github_username=None, github_token=None, CI='true', user_agent=None, keras_backend='torch')
Stat = typing.Dict[typing.Annotated[typing.Union[typing.Annotated[typing.Union[bioimageio.core.stat_measures.SampleMean, bioimageio.core.stat_measures.SampleStd, bioimageio.core.stat_measures.SampleVar, bioimageio.core.stat_measures.SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], typing.Annotated[typing.Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='scope', custom_error_type=None, custom_error_message=None, custom_error_context=None)], typing.Union[float, typing.Annotated[Tensor, BeforeValidator(func=<function tensor_custom_before_validator>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer>, return_type=PydanticUndefined, when_used='always')]]]
class Tensor(bioimageio.core._magic_tensor_ops.MagicTensorOpsMixin):
 50class Tensor(MagicTensorOpsMixin):
 51    """A wrapper around an xr.DataArray for better integration with bioimageio.spec
 52    and improved type annotations."""
 53
 54    _Compatible = Union["Tensor", xr.DataArray, _ScalarOrArray]
 55
 56    def __init__(
 57        self,
 58        array: NDArray[Any],
 59        dims: Sequence[Union[AxisId, AxisLike]],
 60    ) -> None:
 61        super().__init__()
 62        axes = tuple(
 63            a if isinstance(a, AxisId) else AxisInfo.create(a).id for a in dims
 64        )
 65        self._data = xr.DataArray(array, dims=axes)
 66
 67    def __array__(self, dtype: DTypeLike = None):
 68        return np.asarray(self._data, dtype=dtype)
 69
 70    def __getitem__(
 71        self,
 72        key: Union[
 73            SliceInfo,
 74            slice,
 75            int,
 76            PerAxis[Union[SliceInfo, slice, int]],
 77            Tensor,
 78            xr.DataArray,
 79        ],
 80    ) -> Self:
 81        if isinstance(key, SliceInfo):
 82            key = slice(*key)
 83        elif isinstance(key, collections.abc.Mapping):
 84            key = {
 85                a: s if isinstance(s, int) else s if isinstance(s, slice) else slice(*s)
 86                for a, s in key.items()
 87            }
 88        elif isinstance(key, Tensor):
 89            key = key._data
 90
 91        return self.__class__.from_xarray(self._data[key])
 92
 93    def __setitem__(
 94        self,
 95        key: Union[PerAxis[Union[SliceInfo, slice]], Tensor, xr.DataArray],
 96        value: Union[Tensor, xr.DataArray, float, int],
 97    ) -> None:
 98        if isinstance(key, Tensor):
 99            key = key._data
100        elif isinstance(key, xr.DataArray):
101            pass
102        else:
103            key = {a: s if isinstance(s, slice) else slice(*s) for a, s in key.items()}
104
105        if isinstance(value, Tensor):
106            value = value._data
107
108        self._data[key] = value
109
110    def __len__(self) -> int:
111        return len(self.data)
112
113    def _iter(self: Any) -> Iterator[Any]:
114        for n in range(len(self)):
115            yield self[n]
116
117    def __iter__(self: Any) -> Iterator[Any]:
118        if self.ndim == 0:
119            raise TypeError("iteration over a 0-d array")
120        return self._iter()
121
122    def _binary_op(
123        self,
124        other: _Compatible,
125        f: Callable[[Any, Any], Any],
126        reflexive: bool = False,
127    ) -> Self:
128        data = self._data._binary_op(  # pyright: ignore[reportPrivateUsage]
129            (other._data if isinstance(other, Tensor) else other),
130            f,
131            reflexive,
132        )
133        return self.__class__.from_xarray(data)
134
135    def _inplace_binary_op(
136        self,
137        other: _Compatible,
138        f: Callable[[Any, Any], Any],
139    ) -> Self:
140        _ = self._data._inplace_binary_op(  # pyright: ignore[reportPrivateUsage]
141            (
142                other_d
143                if (other_d := getattr(other, "data")) is not None
144                and isinstance(
145                    other_d,
146                    xr.DataArray,
147                )
148                else other
149            ),
150            f,
151        )
152        return self
153
154    def _unary_op(self, f: Callable[[Any], Any], *args: Any, **kwargs: Any) -> Self:
155        data = self._data._unary_op(  # pyright: ignore[reportPrivateUsage]
156            f, *args, **kwargs
157        )
158        return self.__class__.from_xarray(data)
159
160    @classmethod
161    def from_xarray(cls, data_array: xr.DataArray) -> Self:
162        """create a `Tensor` from an xarray data array
163
164        note for internal use: this factory method is round-trip save
165            for any `Tensor`'s  `data` property (an xarray.DataArray).
166        """
167        return cls(
168            array=data_array.data, dims=tuple(AxisId(d) for d in data_array.dims)
169        )
170
171    @classmethod
172    def from_numpy(
173        cls,
174        array: NDArray[Any],
175        *,
176        dims: Optional[Union[AxisLike, Sequence[AxisLike]]],
177    ) -> Tensor:
178        """create a `Tensor` from a numpy array
179
180        Args:
181            array: the nd numpy array
182            axes: A description of the array's axes,
183                if None axes are guessed (which might fail and raise a ValueError.)
184
185        Raises:
186            ValueError: if `axes` is None and axes guessing fails.
187        """
188
189        if dims is None:
190            return cls._interprete_array_wo_known_axes(array)
191        elif isinstance(dims, (str, Axis, v0_5.AxisBase)):
192            dims = [dims]
193
194        axis_infos = [AxisInfo.create(a) for a in dims]
195        original_shape = tuple(array.shape)
196
197        successful_view = _get_array_view(array, axis_infos)
198        if successful_view is None:
199            raise ValueError(
200                f"Array shape {original_shape} does not map to axes {dims}"
201            )
202
203        return Tensor(successful_view, dims=tuple(a.id for a in axis_infos))
204
205    @property
206    def data(self):
207        return self._data
208
209    @property
210    def dims(self):  # TODO: rename to `axes`?
211        """Tuple of dimension names associated with this tensor."""
212        return cast(Tuple[AxisId, ...], self._data.dims)
213
214    @property
215    def dtype(self) -> DTypeStr:
216        dt = str(self.data.dtype)  # pyright: ignore[reportUnknownArgumentType]
217        assert dt in get_args(DTypeStr)
218        return dt  # pyright: ignore[reportReturnType]
219
220    @property
221    def ndim(self):
222        """Number of tensor dimensions."""
223        return self._data.ndim
224
225    @property
226    def shape(self):
227        """Tuple of tensor axes lengths"""
228        return self._data.shape
229
230    @property
231    def shape_tuple(self):
232        """Tuple of tensor axes lengths"""
233        return self._data.shape
234
235    @property
236    def size(self):
237        """Number of elements in the tensor.
238
239        Equal to math.prod(tensor.shape), i.e., the product of the tensors’ dimensions.
240        """
241        return self._data.size
242
243    @property
244    def sizes(self):
245        """Ordered, immutable mapping from axis ids to axis lengths."""
246        return cast(Mapping[AxisId, int], self.data.sizes)
247
248    @property
249    def tagged_shape(self):
250        """(alias for `sizes`) Ordered, immutable mapping from axis ids to lengths."""
251        return self.sizes
252
253    def argmax(self) -> Mapping[AxisId, int]:
254        ret = self._data.argmax(...)
255        assert isinstance(ret, dict)
256        return {cast(AxisId, k): cast(int, v.item()) for k, v in ret.items()}
257
258    def astype(self, dtype: DTypeStr, *, copy: bool = False):
259        """Return tensor cast to `dtype`
260
261        note: if dtype is already satisfied copy if `copy`"""
262        return self.__class__.from_xarray(self._data.astype(dtype, copy=copy))
263
264    def clip(self, min: Optional[float] = None, max: Optional[float] = None):
265        """Return a tensor whose values are limited to [min, max].
266        At least one of max or min must be given."""
267        return self.__class__.from_xarray(self._data.clip(min, max))
268
269    def crop_to(
270        self,
271        sizes: PerAxis[int],
272        crop_where: Union[
273            CropWhere,
274            PerAxis[CropWhere],
275        ] = "left_and_right",
276    ) -> Self:
277        """crop to match `sizes`"""
278        if isinstance(crop_where, str):
279            crop_axis_where: PerAxis[CropWhere] = {a: crop_where for a in self.dims}
280        else:
281            crop_axis_where = crop_where
282
283        slices: Dict[AxisId, SliceInfo] = {}
284
285        for a, s_is in self.sizes.items():
286            if a not in sizes or sizes[a] == s_is:
287                pass
288            elif sizes[a] > s_is:
289                logger.warning(
290                    "Cannot crop axis {} of size {} to larger size {}",
291                    a,
292                    s_is,
293                    sizes[a],
294                )
295            elif a not in crop_axis_where:
296                raise ValueError(
297                    f"Don't know where to crop axis {a}, `crop_where`={crop_where}"
298                )
299            else:
300                crop_this_axis_where = crop_axis_where[a]
301                if crop_this_axis_where == "left":
302                    slices[a] = SliceInfo(s_is - sizes[a], s_is)
303                elif crop_this_axis_where == "right":
304                    slices[a] = SliceInfo(0, sizes[a])
305                elif crop_this_axis_where == "left_and_right":
306                    slices[a] = SliceInfo(
307                        start := (s_is - sizes[a]) // 2, sizes[a] + start
308                    )
309                else:
310                    assert_never(crop_this_axis_where)
311
312        return self[slices]
313
314    def expand_dims(self, dims: Union[Sequence[AxisId], PerAxis[int]]) -> Self:
315        return self.__class__.from_xarray(self._data.expand_dims(dims=dims))
316
317    def item(
318        self,
319        key: Union[
320            None, SliceInfo, slice, int, PerAxis[Union[SliceInfo, slice, int]]
321        ] = None,
322    ):
323        """Copy a tensor element to a standard Python scalar and return it."""
324        if key is None:
325            ret = self._data.item()
326        else:
327            ret = self[key]._data.item()
328
329        assert isinstance(ret, (bool, float, int))
330        return ret
331
332    def mean(self, dim: Optional[Union[AxisId, Sequence[AxisId]]] = None) -> Self:
333        return self.__class__.from_xarray(self._data.mean(dim=dim))
334
335    def pad(
336        self,
337        pad_width: PerAxis[PadWidthLike],
338        mode: PadMode = "symmetric",
339    ) -> Self:
340        pad_width = {a: PadWidth.create(p) for a, p in pad_width.items()}
341        return self.__class__.from_xarray(
342            self._data.pad(pad_width=pad_width, mode=mode)
343        )
344
345    def pad_to(
346        self,
347        sizes: PerAxis[int],
348        pad_where: Union[PadWhere, PerAxis[PadWhere]] = "left_and_right",
349        mode: PadMode = "symmetric",
350    ) -> Self:
351        """pad `tensor` to match `sizes`"""
352        if isinstance(pad_where, str):
353            pad_axis_where: PerAxis[PadWhere] = {a: pad_where for a in self.dims}
354        else:
355            pad_axis_where = pad_where
356
357        pad_width: Dict[AxisId, PadWidth] = {}
358        for a, s_is in self.sizes.items():
359            if a not in sizes or sizes[a] == s_is:
360                pad_width[a] = PadWidth(0, 0)
361            elif s_is > sizes[a]:
362                pad_width[a] = PadWidth(0, 0)
363                logger.warning(
364                    "Cannot pad axis {} of size {} to smaller size {}",
365                    a,
366                    s_is,
367                    sizes[a],
368                )
369            elif a not in pad_axis_where:
370                raise ValueError(
371                    f"Don't know where to pad axis {a}, `pad_where`={pad_where}"
372                )
373            else:
374                pad_this_axis_where = pad_axis_where[a]
375                d = sizes[a] - s_is
376                if pad_this_axis_where == "left":
377                    pad_width[a] = PadWidth(d, 0)
378                elif pad_this_axis_where == "right":
379                    pad_width[a] = PadWidth(0, d)
380                elif pad_this_axis_where == "left_and_right":
381                    pad_width[a] = PadWidth(left := d // 2, d - left)
382                else:
383                    assert_never(pad_this_axis_where)
384
385        return self.pad(pad_width, mode)
386
387    def quantile(
388        self,
389        q: Union[float, Sequence[float]],
390        dim: Optional[Union[AxisId, Sequence[AxisId]]] = None,
391    ) -> Self:
392        assert (
393            isinstance(q, (float, int))
394            and q >= 0.0
395            or not isinstance(q, (float, int))
396            and all(qq >= 0.0 for qq in q)
397        )
398        assert (
399            isinstance(q, (float, int))
400            and q <= 1.0
401            or not isinstance(q, (float, int))
402            and all(qq <= 1.0 for qq in q)
403        )
404        assert dim is None or (
405            (quantile_dim := AxisId("quantile")) != dim and quantile_dim not in set(dim)
406        )
407        return self.__class__.from_xarray(self._data.quantile(q, dim=dim))
408
409    def resize_to(
410        self,
411        sizes: PerAxis[int],
412        *,
413        pad_where: Union[
414            PadWhere,
415            PerAxis[PadWhere],
416        ] = "left_and_right",
417        crop_where: Union[
418            CropWhere,
419            PerAxis[CropWhere],
420        ] = "left_and_right",
421        pad_mode: PadMode = "symmetric",
422    ):
423        """return cropped/padded tensor with `sizes`"""
424        crop_to_sizes: Dict[AxisId, int] = {}
425        pad_to_sizes: Dict[AxisId, int] = {}
426        new_axes = dict(sizes)
427        for a, s_is in self.sizes.items():
428            a = AxisId(str(a))
429            _ = new_axes.pop(a, None)
430            if a not in sizes or sizes[a] == s_is:
431                pass
432            elif s_is > sizes[a]:
433                crop_to_sizes[a] = sizes[a]
434            else:
435                pad_to_sizes[a] = sizes[a]
436
437        tensor = self
438        if crop_to_sizes:
439            tensor = tensor.crop_to(crop_to_sizes, crop_where=crop_where)
440
441        if pad_to_sizes:
442            tensor = tensor.pad_to(pad_to_sizes, pad_where=pad_where, mode=pad_mode)
443
444        if new_axes:
445            tensor = tensor.expand_dims(new_axes)
446
447        return tensor
448
449    def std(self, dim: Optional[Union[AxisId, Sequence[AxisId]]] = None) -> Self:
450        return self.__class__.from_xarray(self._data.std(dim=dim))
451
452    def sum(self, dim: Optional[Union[AxisId, Sequence[AxisId]]] = None) -> Self:
453        """Reduce this Tensor's data by applying sum along some dimension(s)."""
454        return self.__class__.from_xarray(self._data.sum(dim=dim))
455
456    def transpose(
457        self,
458        axes: Sequence[AxisId],
459    ) -> Self:
460        """return a transposed tensor
461
462        Args:
463            axes: the desired tensor axes
464        """
465        # expand missing tensor axes
466        missing_axes = tuple(a for a in axes if a not in self.dims)
467        array = self._data
468        if missing_axes:
469            array = array.expand_dims(missing_axes)
470
471        # transpose to the correct axis order
472        return self.__class__.from_xarray(array.transpose(*axes))
473
474    def var(self, dim: Optional[Union[AxisId, Sequence[AxisId]]] = None) -> Self:
475        return self.__class__.from_xarray(self._data.var(dim=dim))
476
477    @classmethod
478    def _interprete_array_wo_known_axes(cls, array: NDArray[Any]):
479        ndim = array.ndim
480        if ndim == 2:
481            current_axes = (
482                v0_5.SpaceInputAxis(id=AxisId("y"), size=array.shape[0]),
483                v0_5.SpaceInputAxis(id=AxisId("x"), size=array.shape[1]),
484            )
485        elif ndim == 3 and any(s <= 3 for s in array.shape):
486            current_axes = (
487                v0_5.ChannelAxis(
488                    channel_names=[
489                        v0_5.Identifier(f"channel{i}") for i in range(array.shape[0])
490                    ]
491                ),
492                v0_5.SpaceInputAxis(id=AxisId("y"), size=array.shape[1]),
493                v0_5.SpaceInputAxis(id=AxisId("x"), size=array.shape[2]),
494            )
495        elif ndim == 3:
496            current_axes = (
497                v0_5.SpaceInputAxis(id=AxisId("z"), size=array.shape[0]),
498                v0_5.SpaceInputAxis(id=AxisId("y"), size=array.shape[1]),
499                v0_5.SpaceInputAxis(id=AxisId("x"), size=array.shape[2]),
500            )
501        elif ndim == 4:
502            current_axes = (
503                v0_5.ChannelAxis(
504                    channel_names=[
505                        v0_5.Identifier(f"channel{i}") for i in range(array.shape[0])
506                    ]
507                ),
508                v0_5.SpaceInputAxis(id=AxisId("z"), size=array.shape[1]),
509                v0_5.SpaceInputAxis(id=AxisId("y"), size=array.shape[2]),
510                v0_5.SpaceInputAxis(id=AxisId("x"), size=array.shape[3]),
511            )
512        elif ndim == 5:
513            current_axes = (
514                v0_5.BatchAxis(),
515                v0_5.ChannelAxis(
516                    channel_names=[
517                        v0_5.Identifier(f"channel{i}") for i in range(array.shape[1])
518                    ]
519                ),
520                v0_5.SpaceInputAxis(id=AxisId("z"), size=array.shape[2]),
521                v0_5.SpaceInputAxis(id=AxisId("y"), size=array.shape[3]),
522                v0_5.SpaceInputAxis(id=AxisId("x"), size=array.shape[4]),
523            )
524        else:
525            raise ValueError(f"Could not guess an axis mapping for {array.shape}")
526
527        return cls(array, dims=tuple(a.id for a in current_axes))

A wrapper around an xr.DataArray for better integration with bioimageio.spec and improved type annotations.

Tensor( array: numpy.ndarray[typing.Any, numpy.dtype[typing.Any]], dims: Sequence[Union[AxisId, Literal['b', 'i', 't', 'c', 'z', 'y', 'x'], Annotated[Union[bioimageio.spec.model.v0_5.BatchAxis, bioimageio.spec.model.v0_5.ChannelAxis, bioimageio.spec.model.v0_5.IndexInputAxis, bioimageio.spec.model.v0_5.TimeInputAxis, bioimageio.spec.model.v0_5.SpaceInputAxis], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_5.BatchAxis, bioimageio.spec.model.v0_5.ChannelAxis, bioimageio.spec.model.v0_5.IndexOutputAxis, Annotated[Union[Annotated[bioimageio.spec.model.v0_5.TimeOutputAxis, Tag(tag='wo_halo')], Annotated[bioimageio.spec.model.v0_5.TimeOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.model.v0_5.SpaceOutputAxis, Tag(tag='wo_halo')], Annotated[bioimageio.spec.model.v0_5.SpaceOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Axis]])
56    def __init__(
57        self,
58        array: NDArray[Any],
59        dims: Sequence[Union[AxisId, AxisLike]],
60    ) -> None:
61        super().__init__()
62        axes = tuple(
63            a if isinstance(a, AxisId) else AxisInfo.create(a).id for a in dims
64        )
65        self._data = xr.DataArray(array, dims=axes)
@classmethod
def from_xarray(cls, data_array: xarray.core.dataarray.DataArray) -> Self:
160    @classmethod
161    def from_xarray(cls, data_array: xr.DataArray) -> Self:
162        """create a `Tensor` from an xarray data array
163
164        note for internal use: this factory method is round-trip save
165            for any `Tensor`'s  `data` property (an xarray.DataArray).
166        """
167        return cls(
168            array=data_array.data, dims=tuple(AxisId(d) for d in data_array.dims)
169        )

create a Tensor from an xarray data array

note for internal use: this factory method is round-trip save for any Tensor's data property (an xarray.DataArray).

@classmethod
def from_numpy( cls, array: numpy.ndarray[typing.Any, numpy.dtype[typing.Any]], *, dims: Union[AxisId, Literal['b', 'i', 't', 'c', 'z', 'y', 'x'], Annotated[Union[bioimageio.spec.model.v0_5.BatchAxis, bioimageio.spec.model.v0_5.ChannelAxis, bioimageio.spec.model.v0_5.IndexInputAxis, bioimageio.spec.model.v0_5.TimeInputAxis, bioimageio.spec.model.v0_5.SpaceInputAxis], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_5.BatchAxis, bioimageio.spec.model.v0_5.ChannelAxis, bioimageio.spec.model.v0_5.IndexOutputAxis, Annotated[Union[Annotated[bioimageio.spec.model.v0_5.TimeOutputAxis, Tag(tag='wo_halo')], Annotated[bioimageio.spec.model.v0_5.TimeOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.model.v0_5.SpaceOutputAxis, Tag(tag='wo_halo')], Annotated[bioimageio.spec.model.v0_5.SpaceOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Axis, Sequence[Union[AxisId, Literal['b', 'i', 't', 'c', 'z', 'y', 'x'], Annotated[Union[bioimageio.spec.model.v0_5.BatchAxis, bioimageio.spec.model.v0_5.ChannelAxis, bioimageio.spec.model.v0_5.IndexInputAxis, bioimageio.spec.model.v0_5.TimeInputAxis, bioimageio.spec.model.v0_5.SpaceInputAxis], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_5.BatchAxis, bioimageio.spec.model.v0_5.ChannelAxis, bioimageio.spec.model.v0_5.IndexOutputAxis, Annotated[Union[Annotated[bioimageio.spec.model.v0_5.TimeOutputAxis, Tag(tag='wo_halo')], Annotated[bioimageio.spec.model.v0_5.TimeOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.model.v0_5.SpaceOutputAxis, Tag(tag='wo_halo')], Annotated[bioimageio.spec.model.v0_5.SpaceOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Axis]], NoneType]) -> Tensor:
171    @classmethod
172    def from_numpy(
173        cls,
174        array: NDArray[Any],
175        *,
176        dims: Optional[Union[AxisLike, Sequence[AxisLike]]],
177    ) -> Tensor:
178        """create a `Tensor` from a numpy array
179
180        Args:
181            array: the nd numpy array
182            axes: A description of the array's axes,
183                if None axes are guessed (which might fail and raise a ValueError.)
184
185        Raises:
186            ValueError: if `axes` is None and axes guessing fails.
187        """
188
189        if dims is None:
190            return cls._interprete_array_wo_known_axes(array)
191        elif isinstance(dims, (str, Axis, v0_5.AxisBase)):
192            dims = [dims]
193
194        axis_infos = [AxisInfo.create(a) for a in dims]
195        original_shape = tuple(array.shape)
196
197        successful_view = _get_array_view(array, axis_infos)
198        if successful_view is None:
199            raise ValueError(
200                f"Array shape {original_shape} does not map to axes {dims}"
201            )
202
203        return Tensor(successful_view, dims=tuple(a.id for a in axis_infos))

create a Tensor from a numpy array

Arguments:
  • array: the nd numpy array
  • axes: A description of the array's axes, if None axes are guessed (which might fail and raise a ValueError.)
Raises:
  • ValueError: if axes is None and axes guessing fails.
data
205    @property
206    def data(self):
207        return self._data
dims
209    @property
210    def dims(self):  # TODO: rename to `axes`?
211        """Tuple of dimension names associated with this tensor."""
212        return cast(Tuple[AxisId, ...], self._data.dims)

Tuple of dimension names associated with this tensor.

dtype: Literal['bool', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64']
214    @property
215    def dtype(self) -> DTypeStr:
216        dt = str(self.data.dtype)  # pyright: ignore[reportUnknownArgumentType]
217        assert dt in get_args(DTypeStr)
218        return dt  # pyright: ignore[reportReturnType]
ndim
220    @property
221    def ndim(self):
222        """Number of tensor dimensions."""
223        return self._data.ndim

Number of tensor dimensions.

shape
225    @property
226    def shape(self):
227        """Tuple of tensor axes lengths"""
228        return self._data.shape

Tuple of tensor axes lengths

shape_tuple
230    @property
231    def shape_tuple(self):
232        """Tuple of tensor axes lengths"""
233        return self._data.shape

Tuple of tensor axes lengths

size
235    @property
236    def size(self):
237        """Number of elements in the tensor.
238
239        Equal to math.prod(tensor.shape), i.e., the product of the tensors’ dimensions.
240        """
241        return self._data.size

Number of elements in the tensor.

Equal to math.prod(tensor.shape), i.e., the product of the tensors’ dimensions.

sizes
243    @property
244    def sizes(self):
245        """Ordered, immutable mapping from axis ids to axis lengths."""
246        return cast(Mapping[AxisId, int], self.data.sizes)

Ordered, immutable mapping from axis ids to axis lengths.

tagged_shape
248    @property
249    def tagged_shape(self):
250        """(alias for `sizes`) Ordered, immutable mapping from axis ids to lengths."""
251        return self.sizes

(alias for sizes) Ordered, immutable mapping from axis ids to lengths.

def argmax(self) -> Mapping[AxisId, int]:
253    def argmax(self) -> Mapping[AxisId, int]:
254        ret = self._data.argmax(...)
255        assert isinstance(ret, dict)
256        return {cast(AxisId, k): cast(int, v.item()) for k, v in ret.items()}
def astype( self, dtype: Literal['bool', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64'], *, copy: bool = False):
258    def astype(self, dtype: DTypeStr, *, copy: bool = False):
259        """Return tensor cast to `dtype`
260
261        note: if dtype is already satisfied copy if `copy`"""
262        return self.__class__.from_xarray(self._data.astype(dtype, copy=copy))

Return tensor cast to dtype

note: if dtype is already satisfied copy if copy

def clip(self, min: Optional[float] = None, max: Optional[float] = None):
264    def clip(self, min: Optional[float] = None, max: Optional[float] = None):
265        """Return a tensor whose values are limited to [min, max].
266        At least one of max or min must be given."""
267        return self.__class__.from_xarray(self._data.clip(min, max))

Return a tensor whose values are limited to [min, max]. At least one of max or min must be given.

def crop_to( self, sizes: Mapping[AxisId, int], crop_where: Union[Literal['left', 'right', 'left_and_right'], Mapping[AxisId, Literal['left', 'right', 'left_and_right']]] = 'left_and_right') -> Self:
269    def crop_to(
270        self,
271        sizes: PerAxis[int],
272        crop_where: Union[
273            CropWhere,
274            PerAxis[CropWhere],
275        ] = "left_and_right",
276    ) -> Self:
277        """crop to match `sizes`"""
278        if isinstance(crop_where, str):
279            crop_axis_where: PerAxis[CropWhere] = {a: crop_where for a in self.dims}
280        else:
281            crop_axis_where = crop_where
282
283        slices: Dict[AxisId, SliceInfo] = {}
284
285        for a, s_is in self.sizes.items():
286            if a not in sizes or sizes[a] == s_is:
287                pass
288            elif sizes[a] > s_is:
289                logger.warning(
290                    "Cannot crop axis {} of size {} to larger size {}",
291                    a,
292                    s_is,
293                    sizes[a],
294                )
295            elif a not in crop_axis_where:
296                raise ValueError(
297                    f"Don't know where to crop axis {a}, `crop_where`={crop_where}"
298                )
299            else:
300                crop_this_axis_where = crop_axis_where[a]
301                if crop_this_axis_where == "left":
302                    slices[a] = SliceInfo(s_is - sizes[a], s_is)
303                elif crop_this_axis_where == "right":
304                    slices[a] = SliceInfo(0, sizes[a])
305                elif crop_this_axis_where == "left_and_right":
306                    slices[a] = SliceInfo(
307                        start := (s_is - sizes[a]) // 2, sizes[a] + start
308                    )
309                else:
310                    assert_never(crop_this_axis_where)
311
312        return self[slices]

crop to match sizes

def expand_dims( self, dims: Union[Sequence[AxisId], Mapping[AxisId, int]]) -> Self:
314    def expand_dims(self, dims: Union[Sequence[AxisId], PerAxis[int]]) -> Self:
315        return self.__class__.from_xarray(self._data.expand_dims(dims=dims))
def item( self, key: Union[NoneType, bioimageio.core.common.SliceInfo, slice, int, Mapping[AxisId, Union[bioimageio.core.common.SliceInfo, slice, int]]] = None):
317    def item(
318        self,
319        key: Union[
320            None, SliceInfo, slice, int, PerAxis[Union[SliceInfo, slice, int]]
321        ] = None,
322    ):
323        """Copy a tensor element to a standard Python scalar and return it."""
324        if key is None:
325            ret = self._data.item()
326        else:
327            ret = self[key]._data.item()
328
329        assert isinstance(ret, (bool, float, int))
330        return ret

Copy a tensor element to a standard Python scalar and return it.

def mean( self, dim: Union[AxisId, Sequence[AxisId], NoneType] = None) -> Self:
332    def mean(self, dim: Optional[Union[AxisId, Sequence[AxisId]]] = None) -> Self:
333        return self.__class__.from_xarray(self._data.mean(dim=dim))
def pad( self, pad_width: Mapping[AxisId, Union[int, Tuple[int, int], bioimageio.core.common.PadWidth]], mode: Literal['edge', 'reflect', 'symmetric'] = 'symmetric') -> Self:
335    def pad(
336        self,
337        pad_width: PerAxis[PadWidthLike],
338        mode: PadMode = "symmetric",
339    ) -> Self:
340        pad_width = {a: PadWidth.create(p) for a, p in pad_width.items()}
341        return self.__class__.from_xarray(
342            self._data.pad(pad_width=pad_width, mode=mode)
343        )
def pad_to( self, sizes: Mapping[AxisId, int], pad_where: Union[Literal['left', 'right', 'left_and_right'], Mapping[AxisId, Literal['left', 'right', 'left_and_right']]] = 'left_and_right', mode: Literal['edge', 'reflect', 'symmetric'] = 'symmetric') -> Self:
345    def pad_to(
346        self,
347        sizes: PerAxis[int],
348        pad_where: Union[PadWhere, PerAxis[PadWhere]] = "left_and_right",
349        mode: PadMode = "symmetric",
350    ) -> Self:
351        """pad `tensor` to match `sizes`"""
352        if isinstance(pad_where, str):
353            pad_axis_where: PerAxis[PadWhere] = {a: pad_where for a in self.dims}
354        else:
355            pad_axis_where = pad_where
356
357        pad_width: Dict[AxisId, PadWidth] = {}
358        for a, s_is in self.sizes.items():
359            if a not in sizes or sizes[a] == s_is:
360                pad_width[a] = PadWidth(0, 0)
361            elif s_is > sizes[a]:
362                pad_width[a] = PadWidth(0, 0)
363                logger.warning(
364                    "Cannot pad axis {} of size {} to smaller size {}",
365                    a,
366                    s_is,
367                    sizes[a],
368                )
369            elif a not in pad_axis_where:
370                raise ValueError(
371                    f"Don't know where to pad axis {a}, `pad_where`={pad_where}"
372                )
373            else:
374                pad_this_axis_where = pad_axis_where[a]
375                d = sizes[a] - s_is
376                if pad_this_axis_where == "left":
377                    pad_width[a] = PadWidth(d, 0)
378                elif pad_this_axis_where == "right":
379                    pad_width[a] = PadWidth(0, d)
380                elif pad_this_axis_where == "left_and_right":
381                    pad_width[a] = PadWidth(left := d // 2, d - left)
382                else:
383                    assert_never(pad_this_axis_where)
384
385        return self.pad(pad_width, mode)

pad tensor to match sizes

def quantile( self, q: Union[float, Sequence[float]], dim: Union[AxisId, Sequence[AxisId], NoneType] = None) -> Self:
387    def quantile(
388        self,
389        q: Union[float, Sequence[float]],
390        dim: Optional[Union[AxisId, Sequence[AxisId]]] = None,
391    ) -> Self:
392        assert (
393            isinstance(q, (float, int))
394            and q >= 0.0
395            or not isinstance(q, (float, int))
396            and all(qq >= 0.0 for qq in q)
397        )
398        assert (
399            isinstance(q, (float, int))
400            and q <= 1.0
401            or not isinstance(q, (float, int))
402            and all(qq <= 1.0 for qq in q)
403        )
404        assert dim is None or (
405            (quantile_dim := AxisId("quantile")) != dim and quantile_dim not in set(dim)
406        )
407        return self.__class__.from_xarray(self._data.quantile(q, dim=dim))
def resize_to( self, sizes: Mapping[AxisId, int], *, pad_where: Union[Literal['left', 'right', 'left_and_right'], Mapping[AxisId, Literal['left', 'right', 'left_and_right']]] = 'left_and_right', crop_where: Union[Literal['left', 'right', 'left_and_right'], Mapping[AxisId, Literal['left', 'right', 'left_and_right']]] = 'left_and_right', pad_mode: Literal['edge', 'reflect', 'symmetric'] = 'symmetric'):
409    def resize_to(
410        self,
411        sizes: PerAxis[int],
412        *,
413        pad_where: Union[
414            PadWhere,
415            PerAxis[PadWhere],
416        ] = "left_and_right",
417        crop_where: Union[
418            CropWhere,
419            PerAxis[CropWhere],
420        ] = "left_and_right",
421        pad_mode: PadMode = "symmetric",
422    ):
423        """return cropped/padded tensor with `sizes`"""
424        crop_to_sizes: Dict[AxisId, int] = {}
425        pad_to_sizes: Dict[AxisId, int] = {}
426        new_axes = dict(sizes)
427        for a, s_is in self.sizes.items():
428            a = AxisId(str(a))
429            _ = new_axes.pop(a, None)
430            if a not in sizes or sizes[a] == s_is:
431                pass
432            elif s_is > sizes[a]:
433                crop_to_sizes[a] = sizes[a]
434            else:
435                pad_to_sizes[a] = sizes[a]
436
437        tensor = self
438        if crop_to_sizes:
439            tensor = tensor.crop_to(crop_to_sizes, crop_where=crop_where)
440
441        if pad_to_sizes:
442            tensor = tensor.pad_to(pad_to_sizes, pad_where=pad_where, mode=pad_mode)
443
444        if new_axes:
445            tensor = tensor.expand_dims(new_axes)
446
447        return tensor

return cropped/padded tensor with sizes

def std( self, dim: Union[AxisId, Sequence[AxisId], NoneType] = None) -> Self:
449    def std(self, dim: Optional[Union[AxisId, Sequence[AxisId]]] = None) -> Self:
450        return self.__class__.from_xarray(self._data.std(dim=dim))
def sum( self, dim: Union[AxisId, Sequence[AxisId], NoneType] = None) -> Self:
452    def sum(self, dim: Optional[Union[AxisId, Sequence[AxisId]]] = None) -> Self:
453        """Reduce this Tensor's data by applying sum along some dimension(s)."""
454        return self.__class__.from_xarray(self._data.sum(dim=dim))

Reduce this Tensor's data by applying sum along some dimension(s).

def transpose(self, axes: Sequence[AxisId]) -> Self:
456    def transpose(
457        self,
458        axes: Sequence[AxisId],
459    ) -> Self:
460        """return a transposed tensor
461
462        Args:
463            axes: the desired tensor axes
464        """
465        # expand missing tensor axes
466        missing_axes = tuple(a for a in axes if a not in self.dims)
467        array = self._data
468        if missing_axes:
469            array = array.expand_dims(missing_axes)
470
471        # transpose to the correct axis order
472        return self.__class__.from_xarray(array.transpose(*axes))

return a transposed tensor

Arguments:
  • axes: the desired tensor axes
def var( self, dim: Union[AxisId, Sequence[AxisId], NoneType] = None) -> Self:
474    def var(self, dim: Optional[Union[AxisId, Sequence[AxisId]]] = None) -> Self:
475        return self.__class__.from_xarray(self._data.var(dim=dim))
def test_description( source: Union[Annotated[Union[Annotated[Union[Annotated[bioimageio.spec.application.v0_2.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.2')], Annotated[bioimageio.spec.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='application')], Annotated[Union[Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], Annotated[bioimageio.spec.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')], Annotated[Union[Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], Annotated[bioimageio.spec.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')], Annotated[Union[Annotated[bioimageio.spec.NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.2')], Annotated[bioimageio.spec.NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='notebook')]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.generic.v0_2.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.2')], Annotated[bioimageio.spec.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='generic')], Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file'), FieldInfo(annotation=NoneType, required=True, title='FilePath')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, pydantic.networks.HttpUrl, Dict[str, YamlValue]], *, format_version: Union[Literal['latest', 'discover'], str] = 'discover', weight_format: Optional[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_saved_model_bundle', 'torchscript']] = None, devices: Optional[Sequence[str]] = None, determinism: Literal['seed_only', 'full'] = 'seed_only', expected_type: Optional[str] = None, sha256: Optional[bioimageio.spec._internal.io_basics.Sha256] = None, stop_early: bool = True, runtime_env: Union[Literal['currently-active', 'as-described'], pathlib.Path, bioimageio.spec.BioimageioCondaEnv] = 'currently-active', run_command: Callable[[Sequence[str]], NoneType] = <function default_run_command>, **deprecated: Unpack[bioimageio.core._resource_tests.DeprecatedKwargs]) -> ValidationSummary:
197def test_description(
198    source: Union[ResourceDescr, PermissiveFileSource, BioimageioYamlContent],
199    *,
200    format_version: Union[FormatVersionPlaceholder, str] = "discover",
201    weight_format: Optional[SupportedWeightsFormat] = None,
202    devices: Optional[Sequence[str]] = None,
203    determinism: Literal["seed_only", "full"] = "seed_only",
204    expected_type: Optional[str] = None,
205    sha256: Optional[Sha256] = None,
206    stop_early: bool = True,
207    runtime_env: Union[
208        Literal["currently-active", "as-described"], Path, BioimageioCondaEnv
209    ] = ("currently-active"),
210    run_command: Callable[[Sequence[str]], None] = default_run_command,
211    **deprecated: Unpack[DeprecatedKwargs],
212) -> ValidationSummary:
213    """Test a bioimage.io resource dynamically,
214    for example run prediction of test tensors for models.
215
216    Args:
217        source: model description source.
218        weight_format: Weight format to test.
219            Default: All weight formats present in **source**.
220        devices: Devices to test with, e.g. 'cpu', 'cuda'.
221            Default (may be weight format dependent): ['cuda'] if available, ['cpu'] otherwise.
222        determinism: Modes to improve reproducibility of test outputs.
223        expected_type: Assert an expected resource description `type`.
224        sha256: Expected SHA256 value of **source**.
225                (Ignored if **source** already is a loaded `ResourceDescr` object.)
226        stop_early: Do not run further subtests after a failed one.
227        runtime_env: (Experimental feature!) The Python environment to run the tests in
228            - `"currently-active"`: Use active Python interpreter.
229            - `"as-described"`: Use `bioimageio.spec.get_conda_env` to generate a conda
230                environment YAML file based on the model weights description.
231            - A `BioimageioCondaEnv` or a path to a conda environment YAML file.
232                Note: The `bioimageio.core` dependency will be added automatically if not present.
233        run_command: (Experimental feature!) Function to execute (conda) terminal commands in a subprocess.
234            The function should raise an exception if the command fails.
235            **run_command** is ignored if **runtime_env** is `"currently-active"`.
236    """
237    if runtime_env == "currently-active":
238        rd = load_description_and_test(
239            source,
240            format_version=format_version,
241            weight_format=weight_format,
242            devices=devices,
243            determinism=determinism,
244            expected_type=expected_type,
245            sha256=sha256,
246            stop_early=stop_early,
247            **deprecated,
248        )
249        return rd.validation_summary
250
251    if runtime_env == "as-described":
252        conda_env = None
253    elif isinstance(runtime_env, (str, Path)):
254        conda_env = BioimageioCondaEnv.model_validate(read_yaml(Path(runtime_env)))
255    elif isinstance(runtime_env, BioimageioCondaEnv):
256        conda_env = runtime_env
257    else:
258        assert_never(runtime_env)
259
260    td_kwargs: Dict[str, Any] = (
261        dict(ignore_cleanup_errors=True) if sys.version_info >= (3, 10) else {}
262    )
263    with TemporaryDirectory(**td_kwargs) as _d:
264        working_dir = Path(_d)
265        if isinstance(source, (dict, ResourceDescrBase)):
266            file_source = save_bioimageio_package(
267                source, output_path=working_dir / "package.zip"
268            )
269        else:
270            file_source = source
271
272        return _test_in_env(
273            file_source,
274            working_dir=working_dir,
275            weight_format=weight_format,
276            conda_env=conda_env,
277            devices=devices,
278            determinism=determinism,
279            expected_type=expected_type,
280            sha256=sha256,
281            stop_early=stop_early,
282            run_command=run_command,
283            **deprecated,
284        )

Test a bioimage.io resource dynamically, for example run prediction of test tensors for models.

Arguments:
  • source: model description source.
  • weight_format: Weight format to test. Default: All weight formats present in source.
  • devices: Devices to test with, e.g. 'cpu', 'cuda'. Default (may be weight format dependent): ['cuda'] if available, ['cpu'] otherwise.
  • determinism: Modes to improve reproducibility of test outputs.
  • expected_type: Assert an expected resource description type.
  • sha256: Expected SHA256 value of source. (Ignored if source already is a loaded ResourceDescr object.)
  • stop_early: Do not run further subtests after a failed one.
  • runtime_env: (Experimental feature!) The Python environment to run the tests in
    • "currently-active": Use active Python interpreter.
    • "as-described": Use bioimageio.spec.get_conda_env to generate a conda environment YAML file based on the model weights description.
    • A BioimageioCondaEnv or a path to a conda environment YAML file. Note: The bioimageio.core dependency will be added automatically if not present.
  • run_command: (Experimental feature!) Function to execute (conda) terminal commands in a subprocess. The function should raise an exception if the command fails. run_command is ignored if runtime_env is "currently-active".
def test_model( source: Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.ModelDescr, Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file'), FieldInfo(annotation=NoneType, required=True, title='FilePath')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, pydantic.networks.HttpUrl], weight_format: Optional[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_saved_model_bundle', 'torchscript']] = None, devices: Optional[List[str]] = None, *, determinism: Literal['seed_only', 'full'] = 'seed_only', sha256: Optional[bioimageio.spec._internal.io_basics.Sha256] = None, stop_early: bool = True, **deprecated: Unpack[bioimageio.core._resource_tests.DeprecatedKwargs]) -> ValidationSummary:
169def test_model(
170    source: Union[v0_4.ModelDescr, v0_5.ModelDescr, PermissiveFileSource],
171    weight_format: Optional[SupportedWeightsFormat] = None,
172    devices: Optional[List[str]] = None,
173    *,
174    determinism: Literal["seed_only", "full"] = "seed_only",
175    sha256: Optional[Sha256] = None,
176    stop_early: bool = True,
177    **deprecated: Unpack[DeprecatedKwargs],
178) -> ValidationSummary:
179    """Test model inference"""
180    return test_description(
181        source,
182        weight_format=weight_format,
183        devices=devices,
184        determinism=determinism,
185        expected_type="model",
186        sha256=sha256,
187        stop_early=stop_early,
188        **deprecated,
189    )

Test model inference

def test_resource( source: Union[Annotated[Union[Annotated[Union[Annotated[bioimageio.spec.application.v0_2.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.2')], Annotated[bioimageio.spec.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='application')], Annotated[Union[Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], Annotated[bioimageio.spec.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')], Annotated[Union[Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], Annotated[bioimageio.spec.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')], Annotated[Union[Annotated[bioimageio.spec.NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.2')], Annotated[bioimageio.spec.NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='notebook')]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.generic.v0_2.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.2')], Annotated[bioimageio.spec.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='generic')], Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file'), FieldInfo(annotation=NoneType, required=True, title='FilePath')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, pydantic.networks.HttpUrl, Dict[str, YamlValue]], *, format_version: Union[Literal['latest', 'discover'], str] = 'discover', weight_format: Optional[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_saved_model_bundle', 'torchscript']] = None, devices: Optional[Sequence[str]] = None, determinism: Literal['seed_only', 'full'] = 'seed_only', expected_type: Optional[str] = None, sha256: Optional[bioimageio.spec._internal.io_basics.Sha256] = None, stop_early: bool = True, runtime_env: Union[Literal['currently-active', 'as-described'], pathlib.Path, bioimageio.spec.BioimageioCondaEnv] = 'currently-active', run_command: Callable[[Sequence[str]], NoneType] = <function default_run_command>, **deprecated: Unpack[bioimageio.core._resource_tests.DeprecatedKwargs]) -> ValidationSummary:
197def test_description(
198    source: Union[ResourceDescr, PermissiveFileSource, BioimageioYamlContent],
199    *,
200    format_version: Union[FormatVersionPlaceholder, str] = "discover",
201    weight_format: Optional[SupportedWeightsFormat] = None,
202    devices: Optional[Sequence[str]] = None,
203    determinism: Literal["seed_only", "full"] = "seed_only",
204    expected_type: Optional[str] = None,
205    sha256: Optional[Sha256] = None,
206    stop_early: bool = True,
207    runtime_env: Union[
208        Literal["currently-active", "as-described"], Path, BioimageioCondaEnv
209    ] = ("currently-active"),
210    run_command: Callable[[Sequence[str]], None] = default_run_command,
211    **deprecated: Unpack[DeprecatedKwargs],
212) -> ValidationSummary:
213    """Test a bioimage.io resource dynamically,
214    for example run prediction of test tensors for models.
215
216    Args:
217        source: model description source.
218        weight_format: Weight format to test.
219            Default: All weight formats present in **source**.
220        devices: Devices to test with, e.g. 'cpu', 'cuda'.
221            Default (may be weight format dependent): ['cuda'] if available, ['cpu'] otherwise.
222        determinism: Modes to improve reproducibility of test outputs.
223        expected_type: Assert an expected resource description `type`.
224        sha256: Expected SHA256 value of **source**.
225                (Ignored if **source** already is a loaded `ResourceDescr` object.)
226        stop_early: Do not run further subtests after a failed one.
227        runtime_env: (Experimental feature!) The Python environment to run the tests in
228            - `"currently-active"`: Use active Python interpreter.
229            - `"as-described"`: Use `bioimageio.spec.get_conda_env` to generate a conda
230                environment YAML file based on the model weights description.
231            - A `BioimageioCondaEnv` or a path to a conda environment YAML file.
232                Note: The `bioimageio.core` dependency will be added automatically if not present.
233        run_command: (Experimental feature!) Function to execute (conda) terminal commands in a subprocess.
234            The function should raise an exception if the command fails.
235            **run_command** is ignored if **runtime_env** is `"currently-active"`.
236    """
237    if runtime_env == "currently-active":
238        rd = load_description_and_test(
239            source,
240            format_version=format_version,
241            weight_format=weight_format,
242            devices=devices,
243            determinism=determinism,
244            expected_type=expected_type,
245            sha256=sha256,
246            stop_early=stop_early,
247            **deprecated,
248        )
249        return rd.validation_summary
250
251    if runtime_env == "as-described":
252        conda_env = None
253    elif isinstance(runtime_env, (str, Path)):
254        conda_env = BioimageioCondaEnv.model_validate(read_yaml(Path(runtime_env)))
255    elif isinstance(runtime_env, BioimageioCondaEnv):
256        conda_env = runtime_env
257    else:
258        assert_never(runtime_env)
259
260    td_kwargs: Dict[str, Any] = (
261        dict(ignore_cleanup_errors=True) if sys.version_info >= (3, 10) else {}
262    )
263    with TemporaryDirectory(**td_kwargs) as _d:
264        working_dir = Path(_d)
265        if isinstance(source, (dict, ResourceDescrBase)):
266            file_source = save_bioimageio_package(
267                source, output_path=working_dir / "package.zip"
268            )
269        else:
270            file_source = source
271
272        return _test_in_env(
273            file_source,
274            working_dir=working_dir,
275            weight_format=weight_format,
276            conda_env=conda_env,
277            devices=devices,
278            determinism=determinism,
279            expected_type=expected_type,
280            sha256=sha256,
281            stop_early=stop_early,
282            run_command=run_command,
283            **deprecated,
284        )
def validate_format( data: Dict[str, YamlValue], /, *, format_version: Union[Literal['latest', 'discover'], str] = 'discover', context: Optional[bioimageio.spec.ValidationContext] = None) -> ValidationSummary:
204def validate_format(
205    data: BioimageioYamlContent,
206    /,
207    *,
208    format_version: Union[Literal["discover", "latest"], str] = DISCOVER,
209    context: Optional[ValidationContext] = None,
210) -> ValidationSummary:
211    """Validate a dictionary holding a bioimageio description.
212    See `bioimagieo.spec.load_description_and_validate_format_only`
213    to validate a file source.
214
215    Args:
216        data: Dictionary holding the raw bioimageio.yaml content.
217        format_version: Format version to (update to and) use for validation.
218        context: Validation context, see `bioimagieo.spec.ValidationContext`
219
220    Note:
221        Use `bioimagieo.spec.load_description_and_validate_format_only` to validate a
222        file source instead of loading the YAML content and creating the appropriate
223        `ValidationContext`.
224
225        Alternatively you can use `bioimagieo.spec.load_description` and access the
226        `validation_summary` attribute of the returned object.
227    """
228    with context or get_validation_context():
229        rd = build_description(data, format_version=format_version)
230
231    assert rd.validation_summary is not None
232    return rd.validation_summary

Validate a dictionary holding a bioimageio description. See bioimagieo.spec.load_description_and_validate_format_only to validate a file source.

Arguments:
  • data: Dictionary holding the raw bioimageio.yaml content.
  • format_version: Format version to (update to and) use for validation.
  • context: Validation context, see bioimagieo.spec.ValidationContext
Note:

Use bioimagieo.spec.load_description_and_validate_format_only to validate a file source instead of loading the YAML content and creating the appropriate ValidationContext.

Alternatively you can use bioimagieo.spec.load_description and access the validation_summary attribute of the returned object.

class ValidationSummary(pydantic.main.BaseModel):
239class ValidationSummary(BaseModel, extra="allow"):
240    """Summarizes output of all bioimageio validations and tests
241    for one specific `ResourceDescr` instance."""
242
243    name: str
244    """Name of the validation"""
245    source_name: str
246    """Source of the validated bioimageio description"""
247    id: Optional[str] = None
248    """ID of the resource being validated"""
249    type: str
250    """Type of the resource being validated"""
251    format_version: str
252    """Format version of the resource being validated"""
253    status: Literal["passed", "valid-format", "failed"]
254    """overall status of the bioimageio validation"""
255    metadata_completeness: Annotated[float, annotated_types.Interval(ge=0, le=1)] = 0.0
256    """Estimate of completeness of the metadata in the resource description.
257
258    Note: This completeness estimate may change with subsequent releases
259        and should be considered bioimageio.spec version specific.
260    """
261
262    details: List[ValidationDetail]
263    """List of validation details"""
264    env: Set[InstalledPackage] = Field(
265        default_factory=lambda: {
266            InstalledPackage(
267                name="bioimageio.spec",
268                version=VERSION,
269            )
270        }
271    )
272    """List of selected, relevant package versions"""
273
274    saved_conda_list: Optional[str] = None
275
276    @field_serializer("saved_conda_list")
277    def _save_conda_list(self, value: Optional[str]):
278        return self.conda_list
279
280    @property
281    def conda_list(self):
282        if self.saved_conda_list is None:
283            p = subprocess.run(
284                ["conda", "list"],
285                stdout=subprocess.PIPE,
286                stderr=subprocess.STDOUT,
287                shell=True,
288                text=True,
289            )
290            self.saved_conda_list = (
291                p.stdout or f"`conda list` exited with {p.returncode}"
292            )
293
294        return self.saved_conda_list
295
296    @property
297    def status_icon(self):
298        if self.status == "passed":
299            return "✔️"
300        elif self.status == "valid-format":
301            return "🟡"
302        else:
303            return "❌"
304
305    @property
306    def errors(self) -> List[ErrorEntry]:
307        return list(chain.from_iterable(d.errors for d in self.details))
308
309    @property
310    def warnings(self) -> List[WarningEntry]:
311        return list(chain.from_iterable(d.warnings for d in self.details))
312
313    def format(
314        self,
315        *,
316        width: Optional[int] = None,
317        include_conda_list: bool = False,
318    ):
319        """Format summary as Markdown string"""
320        return self._format(
321            width=width, target="md", include_conda_list=include_conda_list
322        )
323
324    format_md = format
325
326    def format_html(
327        self,
328        *,
329        width: Optional[int] = None,
330        include_conda_list: bool = False,
331    ):
332        md_with_html = self._format(
333            target="html", width=width, include_conda_list=include_conda_list
334        )
335        return markdown.markdown(
336            md_with_html, extensions=["tables", "fenced_code", "nl2br"]
337        )
338
339    def display(
340        self,
341        *,
342        width: Optional[int] = None,
343        include_conda_list: bool = False,
344        tab_size: int = 4,
345        soft_wrap: bool = True,
346    ) -> None:
347        try:  # render as HTML in Jupyter notebook
348            from IPython.core.getipython import get_ipython
349            from IPython.display import (
350                display_html,  # pyright: ignore[reportUnknownVariableType]
351            )
352        except ImportError:
353            pass
354        else:
355            if get_ipython() is not None:
356                _ = display_html(
357                    self.format_html(
358                        width=width, include_conda_list=include_conda_list
359                    ),
360                    raw=True,
361                )
362                return
363
364        # render with rich
365        _ = self._format(
366            target=rich.console.Console(
367                width=width,
368                tab_size=tab_size,
369                soft_wrap=soft_wrap,
370            ),
371            width=width,
372            include_conda_list=include_conda_list,
373        )
374
375    def add_detail(self, detail: ValidationDetail):
376        if detail.status == "failed":
377            self.status = "failed"
378        elif detail.status != "passed":
379            assert_never(detail.status)
380
381        self.details.append(detail)
382
383    def log(
384        self,
385        to: Union[Literal["display"], Path, Sequence[Union[Literal["display"], Path]]],
386    ) -> List[Path]:
387        """Convenience method to display the validation summary in the terminal and/or
388        save it to disk. See `save` for details."""
389        if to == "display":
390            display = True
391            save_to = []
392        elif isinstance(to, Path):
393            display = False
394            save_to = [to]
395        else:
396            display = "display" in to
397            save_to = [p for p in to if p != "display"]
398
399        if display:
400            self.display()
401
402        return self.save(save_to)
403
404    def save(
405        self, path: Union[Path, Sequence[Path]] = Path("{id}_summary_{now}")
406    ) -> List[Path]:
407        """Save the validation/test summary in JSON, Markdown or HTML format.
408
409        Returns:
410            List of file paths the summary was saved to.
411
412        Notes:
413        - Format is chosen based on the suffix: `.json`, `.md`, `.html`.
414        - If **path** has no suffix it is assumed to be a direcotry to which a
415          `summary.json`, `summary.md` and `summary.html` are saved to.
416        """
417        if isinstance(path, (str, Path)):
418            path = [Path(path)]
419
420        # folder to file paths
421        file_paths: List[Path] = []
422        for p in path:
423            if p.suffix:
424                file_paths.append(p)
425            else:
426                file_paths.extend(
427                    [
428                        p / "summary.json",
429                        p / "summary.md",
430                        p / "summary.html",
431                    ]
432                )
433
434        now = datetime.now(timezone.utc).strftime("%Y%m%dT%H%M%SZ")
435        for p in file_paths:
436            p = Path(str(p).format(id=self.id or "bioimageio", now=now))
437            if p.suffix == ".json":
438                self.save_json(p)
439            elif p.suffix == ".md":
440                self.save_markdown(p)
441            elif p.suffix == ".html":
442                self.save_html(p)
443            else:
444                raise ValueError(f"Unknown summary path suffix '{p.suffix}'")
445
446        return file_paths
447
448    def save_json(
449        self, path: Path = Path("summary.json"), *, indent: Optional[int] = 2
450    ):
451        """Save validation/test summary as JSON file."""
452        json_str = self.model_dump_json(indent=indent)
453        path.parent.mkdir(exist_ok=True, parents=True)
454        _ = path.write_text(json_str, encoding="utf-8")
455        logger.info("Saved summary to {}", path.absolute())
456
457    def save_markdown(self, path: Path = Path("summary.md")):
458        """Save rendered validation/test summary as Markdown file."""
459        formatted = self.format_md()
460        path.parent.mkdir(exist_ok=True, parents=True)
461        _ = path.write_text(formatted, encoding="utf-8")
462        logger.info("Saved Markdown formatted summary to {}", path.absolute())
463
464    def save_html(self, path: Path = Path("summary.html")) -> None:
465        """Save rendered validation/test summary as HTML file."""
466        path.parent.mkdir(exist_ok=True, parents=True)
467
468        html = self.format_html()
469        _ = path.write_text(html, encoding="utf-8")
470        logger.info("Saved HTML formatted summary to {}", path.absolute())
471
472    @classmethod
473    def load_json(cls, path: Path) -> Self:
474        """Load validation/test summary from a suitable JSON file"""
475        json_str = Path(path).read_text(encoding="utf-8")
476        return cls.model_validate_json(json_str)
477
478    @field_validator("env", mode="before")
479    def _convert_dict(cls, value: List[Union[List[str], Dict[str, str]]]):
480        """convert old env value for backwards compatibility"""
481        if isinstance(value, list):
482            return [
483                (
484                    (v["name"], v["version"], v.get("build", ""), v.get("channel", ""))
485                    if isinstance(v, dict) and "name" in v and "version" in v
486                    else v
487                )
488                for v in value
489            ]
490        else:
491            return value
492
493    def _format(
494        self,
495        *,
496        target: Union[rich.console.Console, Literal["html", "md"]],
497        width: Optional[int],
498        include_conda_list: bool,
499    ):
500        return _format_summary(
501            self,
502            target=target,
503            width=width or 100,
504            include_conda_list=include_conda_list,
505        )

Summarizes output of all bioimageio validations and tests for one specific ResourceDescr instance.

name: str

Name of the validation

source_name: str

Source of the validated bioimageio description

id: Optional[str]

ID of the resource being validated

type: str

Type of the resource being validated

format_version: str

Format version of the resource being validated

status: Literal['passed', 'valid-format', 'failed']

overall status of the bioimageio validation

metadata_completeness: Annotated[float, Interval(gt=None, ge=0, lt=None, le=1)]

Estimate of completeness of the metadata in the resource description.

Note: This completeness estimate may change with subsequent releases and should be considered bioimageio.spec version specific.

List of validation details

List of selected, relevant package versions

saved_conda_list: Optional[str]
conda_list
280    @property
281    def conda_list(self):
282        if self.saved_conda_list is None:
283            p = subprocess.run(
284                ["conda", "list"],
285                stdout=subprocess.PIPE,
286                stderr=subprocess.STDOUT,
287                shell=True,
288                text=True,
289            )
290            self.saved_conda_list = (
291                p.stdout or f"`conda list` exited with {p.returncode}"
292            )
293
294        return self.saved_conda_list
status_icon
296    @property
297    def status_icon(self):
298        if self.status == "passed":
299            return "✔️"
300        elif self.status == "valid-format":
301            return "🟡"
302        else:
303            return "❌"
errors: List[bioimageio.spec.summary.ErrorEntry]
305    @property
306    def errors(self) -> List[ErrorEntry]:
307        return list(chain.from_iterable(d.errors for d in self.details))
warnings: List[bioimageio.spec.summary.WarningEntry]
309    @property
310    def warnings(self) -> List[WarningEntry]:
311        return list(chain.from_iterable(d.warnings for d in self.details))
def format( self, *, width: Optional[int] = None, include_conda_list: bool = False):
313    def format(
314        self,
315        *,
316        width: Optional[int] = None,
317        include_conda_list: bool = False,
318    ):
319        """Format summary as Markdown string"""
320        return self._format(
321            width=width, target="md", include_conda_list=include_conda_list
322        )

Format summary as Markdown string

def format_md( self, *, width: Optional[int] = None, include_conda_list: bool = False):
313    def format(
314        self,
315        *,
316        width: Optional[int] = None,
317        include_conda_list: bool = False,
318    ):
319        """Format summary as Markdown string"""
320        return self._format(
321            width=width, target="md", include_conda_list=include_conda_list
322        )

Format summary as Markdown string

def format_html( self, *, width: Optional[int] = None, include_conda_list: bool = False):
326    def format_html(
327        self,
328        *,
329        width: Optional[int] = None,
330        include_conda_list: bool = False,
331    ):
332        md_with_html = self._format(
333            target="html", width=width, include_conda_list=include_conda_list
334        )
335        return markdown.markdown(
336            md_with_html, extensions=["tables", "fenced_code", "nl2br"]
337        )
def display( self, *, width: Optional[int] = None, include_conda_list: bool = False, tab_size: int = 4, soft_wrap: bool = True) -> None:
339    def display(
340        self,
341        *,
342        width: Optional[int] = None,
343        include_conda_list: bool = False,
344        tab_size: int = 4,
345        soft_wrap: bool = True,
346    ) -> None:
347        try:  # render as HTML in Jupyter notebook
348            from IPython.core.getipython import get_ipython
349            from IPython.display import (
350                display_html,  # pyright: ignore[reportUnknownVariableType]
351            )
352        except ImportError:
353            pass
354        else:
355            if get_ipython() is not None:
356                _ = display_html(
357                    self.format_html(
358                        width=width, include_conda_list=include_conda_list
359                    ),
360                    raw=True,
361                )
362                return
363
364        # render with rich
365        _ = self._format(
366            target=rich.console.Console(
367                width=width,
368                tab_size=tab_size,
369                soft_wrap=soft_wrap,
370            ),
371            width=width,
372            include_conda_list=include_conda_list,
373        )
def add_detail(self, detail: bioimageio.spec.summary.ValidationDetail):
375    def add_detail(self, detail: ValidationDetail):
376        if detail.status == "failed":
377            self.status = "failed"
378        elif detail.status != "passed":
379            assert_never(detail.status)
380
381        self.details.append(detail)
def log( self, to: Union[Literal['display'], pathlib.Path, Sequence[Union[Literal['display'], pathlib.Path]]]) -> List[pathlib.Path]:
383    def log(
384        self,
385        to: Union[Literal["display"], Path, Sequence[Union[Literal["display"], Path]]],
386    ) -> List[Path]:
387        """Convenience method to display the validation summary in the terminal and/or
388        save it to disk. See `save` for details."""
389        if to == "display":
390            display = True
391            save_to = []
392        elif isinstance(to, Path):
393            display = False
394            save_to = [to]
395        else:
396            display = "display" in to
397            save_to = [p for p in to if p != "display"]
398
399        if display:
400            self.display()
401
402        return self.save(save_to)

Convenience method to display the validation summary in the terminal and/or save it to disk. See save for details.

def save( self, path: Union[pathlib.Path, Sequence[pathlib.Path]] = PosixPath('{id}_summary_{now}')) -> List[pathlib.Path]:
404    def save(
405        self, path: Union[Path, Sequence[Path]] = Path("{id}_summary_{now}")
406    ) -> List[Path]:
407        """Save the validation/test summary in JSON, Markdown or HTML format.
408
409        Returns:
410            List of file paths the summary was saved to.
411
412        Notes:
413        - Format is chosen based on the suffix: `.json`, `.md`, `.html`.
414        - If **path** has no suffix it is assumed to be a direcotry to which a
415          `summary.json`, `summary.md` and `summary.html` are saved to.
416        """
417        if isinstance(path, (str, Path)):
418            path = [Path(path)]
419
420        # folder to file paths
421        file_paths: List[Path] = []
422        for p in path:
423            if p.suffix:
424                file_paths.append(p)
425            else:
426                file_paths.extend(
427                    [
428                        p / "summary.json",
429                        p / "summary.md",
430                        p / "summary.html",
431                    ]
432                )
433
434        now = datetime.now(timezone.utc).strftime("%Y%m%dT%H%M%SZ")
435        for p in file_paths:
436            p = Path(str(p).format(id=self.id or "bioimageio", now=now))
437            if p.suffix == ".json":
438                self.save_json(p)
439            elif p.suffix == ".md":
440                self.save_markdown(p)
441            elif p.suffix == ".html":
442                self.save_html(p)
443            else:
444                raise ValueError(f"Unknown summary path suffix '{p.suffix}'")
445
446        return file_paths

Save the validation/test summary in JSON, Markdown or HTML format.

Returns:

List of file paths the summary was saved to.

Notes:

  • Format is chosen based on the suffix: .json, .md, .html.
  • If path has no suffix it is assumed to be a direcotry to which a summary.json, summary.md and summary.html are saved to.
def save_json( self, path: pathlib.Path = PosixPath('summary.json'), *, indent: Optional[int] = 2):
448    def save_json(
449        self, path: Path = Path("summary.json"), *, indent: Optional[int] = 2
450    ):
451        """Save validation/test summary as JSON file."""
452        json_str = self.model_dump_json(indent=indent)
453        path.parent.mkdir(exist_ok=True, parents=True)
454        _ = path.write_text(json_str, encoding="utf-8")
455        logger.info("Saved summary to {}", path.absolute())

Save validation/test summary as JSON file.

def save_markdown(self, path: pathlib.Path = PosixPath('summary.md')):
457    def save_markdown(self, path: Path = Path("summary.md")):
458        """Save rendered validation/test summary as Markdown file."""
459        formatted = self.format_md()
460        path.parent.mkdir(exist_ok=True, parents=True)
461        _ = path.write_text(formatted, encoding="utf-8")
462        logger.info("Saved Markdown formatted summary to {}", path.absolute())

Save rendered validation/test summary as Markdown file.

def save_html(self, path: pathlib.Path = PosixPath('summary.html')) -> None:
464    def save_html(self, path: Path = Path("summary.html")) -> None:
465        """Save rendered validation/test summary as HTML file."""
466        path.parent.mkdir(exist_ok=True, parents=True)
467
468        html = self.format_html()
469        _ = path.write_text(html, encoding="utf-8")
470        logger.info("Saved HTML formatted summary to {}", path.absolute())

Save rendered validation/test summary as HTML file.

@classmethod
def load_json(cls, path: pathlib.Path) -> Self:
472    @classmethod
473    def load_json(cls, path: Path) -> Self:
474        """Load validation/test summary from a suitable JSON file"""
475        json_str = Path(path).read_text(encoding="utf-8")
476        return cls.model_validate_json(json_str)

Load validation/test summary from a suitable JSON file

model_config: ClassVar[pydantic.config.ConfigDict] = {'extra': 'allow'}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].