bioimageio.spec

  1"""
  2.. include:: ../../README.md
  3"""
  4
  5from . import (
  6    application,
  7    common,
  8    conda_env,
  9    dataset,
 10    generic,
 11    model,
 12    pretty_validation_errors,
 13    summary,
 14    utils,
 15)
 16from ._description import (
 17    LatestResourceDescr,
 18    ResourceDescr,
 19    SpecificResourceDescr,
 20    build_description,
 21    dump_description,
 22    validate_format,
 23)
 24from ._get_conda_env import BioimageioCondaEnv, get_conda_env
 25from ._internal import settings
 26from ._internal.common_nodes import InvalidDescr
 27from ._internal.constants import VERSION
 28from ._internal.validation_context import ValidationContext, get_validation_context
 29from ._io import (
 30    load_dataset_description,
 31    load_description,
 32    load_description_and_validate_format_only,
 33    load_model_description,
 34    save_bioimageio_yaml_only,
 35    update_format,
 36    update_hashes,
 37)
 38from ._package import (
 39    get_resource_package_content,
 40    save_bioimageio_package,
 41    save_bioimageio_package_as_folder,
 42    save_bioimageio_package_to_stream,
 43)
 44from .application import AnyApplicationDescr, ApplicationDescr
 45from .dataset import AnyDatasetDescr, DatasetDescr
 46from .generic import AnyGenericDescr, GenericDescr
 47from .model import AnyModelDescr, ModelDescr
 48from .notebook import AnyNotebookDescr, NotebookDescr
 49from .pretty_validation_errors import enable_pretty_validation_errors_in_ipynb
 50from .summary import ValidationSummary
 51
 52__version__ = VERSION
 53
 54__all__ = [
 55    "__version__",
 56    "AnyApplicationDescr",
 57    "AnyDatasetDescr",
 58    "AnyGenericDescr",
 59    "AnyModelDescr",
 60    "AnyNotebookDescr",
 61    "application",
 62    "ApplicationDescr",
 63    "BioimageioCondaEnv",
 64    "build_description",
 65    "common",
 66    "conda_env",
 67    "dataset",
 68    "DatasetDescr",
 69    "dump_description",
 70    "enable_pretty_validation_errors_in_ipynb",
 71    "generic",
 72    "GenericDescr",
 73    "get_conda_env",
 74    "get_resource_package_content",
 75    "get_validation_context",
 76    "InvalidDescr",
 77    "LatestResourceDescr",
 78    "load_dataset_description",
 79    "load_description_and_validate_format_only",
 80    "load_description",
 81    "load_model_description",
 82    "model",
 83    "ModelDescr",
 84    "NotebookDescr",
 85    "pretty_validation_errors",
 86    "ResourceDescr",
 87    "save_bioimageio_package_as_folder",
 88    "save_bioimageio_package_to_stream",
 89    "save_bioimageio_package",
 90    "save_bioimageio_yaml_only",
 91    "settings",
 92    "SpecificResourceDescr",
 93    "summary",
 94    "update_format",
 95    "update_hashes",
 96    "utils",
 97    "validate_format",
 98    "ValidationContext",
 99    "ValidationSummary",
100]
__version__ = '0.5.4.1'
AnyApplicationDescr = typing.Annotated[typing.Union[typing.Annotated[bioimageio.spec.application.v0_2.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.2')], typing.Annotated[ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='application')]
AnyDatasetDescr = typing.Annotated[typing.Union[typing.Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], typing.Annotated[DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')]
AnyGenericDescr = typing.Annotated[typing.Union[typing.Annotated[bioimageio.spec.generic.v0_2.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.2')], typing.Annotated[GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='generic')]
AnyModelDescr = typing.Annotated[typing.Union[typing.Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], typing.Annotated[ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')]
AnyNotebookDescr = typing.Annotated[typing.Union[typing.Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.2')], typing.Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='notebook')]
class ApplicationDescr(bioimageio.spec.generic.v0_3.GenericDescrBase):
34class ApplicationDescr(GenericDescrBase):
35    """Bioimage.io description of an application."""
36
37    implemented_type: ClassVar[Literal["application"]] = "application"
38    if TYPE_CHECKING:
39        type: Literal["application"] = "application"
40    else:
41        type: Literal["application"]
42
43    id: Optional[ApplicationId] = None
44    """bioimage.io-wide unique resource identifier
45    assigned by bioimage.io; version **un**specific."""
46
47    parent: Optional[ApplicationId] = None
48    """The description from which this one is derived"""
49
50    source: Annotated[
51        Optional[ImportantFileSource],
52        Field(description="URL or path to the source of the application"),
53    ] = None
54    """The primary source of the application"""

Bioimage.io description of an application.

implemented_type: ClassVar[Literal['application']] = 'application'

bioimage.io-wide unique resource identifier assigned by bioimage.io; version unspecific.

The description from which this one is derived

source: Annotated[Optional[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file'), FieldInfo(annotation=NoneType, required=True, title='FilePath')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), AfterValidator(func=<function wo_special_file_name at 0x7f2602536ca0>), PlainSerializer(func=<function _package at 0x7f2602535e40>, return_type=PydanticUndefined, when_used='unless-none')]], FieldInfo(annotation=NoneType, required=True, description='URL or path to the source of the application')]

The primary source of the application

implemented_format_version_tuple: ClassVar[Tuple[int, int, int]] = (0, 3, 0)
def model_post_init(self: pydantic.main.BaseModel, context: Any, /) -> None:
124                    def wrapped_model_post_init(self: BaseModel, context: Any, /) -> None:
125                        """We need to both initialize private attributes and call the user-defined model_post_init
126                        method.
127                        """
128                        init_private_attributes(self, context)
129                        original_model_post_init(self, context)

We need to both initialize private attributes and call the user-defined model_post_init method.

class BioimageioCondaEnv(bioimageio.spec.conda_env.CondaEnv):
 66class BioimageioCondaEnv(CondaEnv):
 67    """A special `CondaEnv` that
 68    - automatically adds bioimageio specific dependencies
 69    - sorts dependencies
 70    """
 71
 72    @model_validator(mode="after")
 73    def _normalize_bioimageio_conda_env(self):
 74        """update a conda env such that we have bioimageio.core and sorted dependencies"""
 75        for req_channel in ("conda-forge", "nodefaults"):
 76            if req_channel not in self.channels:
 77                self.channels.append(req_channel)
 78
 79        if "defaults" in self.channels:
 80            warnings.warn("removing 'defaults' from conda-channels")
 81            self.channels.remove("defaults")
 82
 83        if "pip" not in self.dependencies:
 84            self.dependencies.append("pip")
 85
 86        for dep in self.dependencies:
 87            if isinstance(dep, PipDeps):
 88                pip_section = dep
 89                pip_section.pip.sort()
 90                break
 91        else:
 92            pip_section = None
 93
 94        if (
 95            pip_section is None
 96            or not any(pd.startswith("bioimageio.core") for pd in pip_section.pip)
 97        ) and not any(
 98            d.startswith("bioimageio.core")
 99            or d.startswith("conda-forge::bioimageio.core")
100            for d in self.dependencies
101            if not isinstance(d, PipDeps)
102        ):
103            self.dependencies.append("conda-forge::bioimageio.core")
104
105        self.dependencies.sort()
106        return self

A special CondaEnv that

  • automatically adds bioimageio specific dependencies
  • sorts dependencies
def build_description( content: Dict[str, YamlValue], /, *, context: Optional[ValidationContext] = None, format_version: Union[Literal['latest', 'discover'], str] = 'discover') -> Union[Annotated[Union[Annotated[Union[Annotated[bioimageio.spec.application.v0_2.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.2')], Annotated[ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='application')], Annotated[Union[Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], Annotated[DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')], Annotated[Union[Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], Annotated[ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')], Annotated[Union[Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.2')], Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='notebook')]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.generic.v0_2.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.2')], Annotated[GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='generic')], InvalidDescr]:
173def build_description(
174    content: BioimageioYamlContent,
175    /,
176    *,
177    context: Optional[ValidationContext] = None,
178    format_version: Union[FormatVersionPlaceholder, str] = DISCOVER,
179) -> Union[ResourceDescr, InvalidDescr]:
180    """build a bioimage.io resource description from an RDF's content.
181
182    Use `load_description` if you want to build a resource description from an rdf.yaml
183    or bioimage.io zip-package.
184
185    Args:
186        content: loaded rdf.yaml file (loaded with YAML, not bioimageio.spec)
187        context: validation context to use during validation
188        format_version: (optional) use this argument to load the resource and
189                        convert its metadata to a higher format_version
190
191    Returns:
192        An object holding all metadata of the bioimage.io resource
193
194    """
195
196    return build_description_impl(
197        content,
198        context=context,
199        format_version=format_version,
200        get_rd_class=_get_rd_class,
201    )

build a bioimage.io resource description from an RDF's content.

Use load_description if you want to build a resource description from an rdf.yaml or bioimage.io zip-package.

Arguments:
  • content: loaded rdf.yaml file (loaded with YAML, not bioimageio.spec)
  • context: validation context to use during validation
  • format_version: (optional) use this argument to load the resource and convert its metadata to a higher format_version
Returns:

An object holding all metadata of the bioimage.io resource

class DatasetDescr(bioimageio.spec.generic.v0_3.GenericDescrBase):
 41class DatasetDescr(GenericDescrBase):
 42    """A bioimage.io dataset resource description file (dataset RDF) describes a dataset relevant to bioimage
 43    processing.
 44    """
 45
 46    implemented_type: ClassVar[Literal["dataset"]] = "dataset"
 47    if TYPE_CHECKING:
 48        type: Literal["dataset"] = "dataset"
 49    else:
 50        type: Literal["dataset"]
 51
 52    id: Optional[DatasetId] = None
 53    """bioimage.io-wide unique resource identifier
 54    assigned by bioimage.io; version **un**specific."""
 55
 56    parent: Optional[DatasetId] = None
 57    """The description from which this one is derived"""
 58
 59    source: Optional[HttpUrl] = None
 60    """"URL to the source of the dataset."""
 61
 62    @model_validator(mode="before")
 63    @classmethod
 64    def _convert(cls, data: Dict[str, Any], /) -> Dict[str, Any]:
 65        if (
 66            data.get("type") == "dataset"
 67            and isinstance(fv := data.get("format_version"), str)
 68            and fv.startswith("0.2.")
 69        ):
 70            old = DatasetDescr02.load(data)
 71            if isinstance(old, InvalidDescr):
 72                return data
 73
 74            return cast(
 75                Dict[str, Any],
 76                (cls if TYPE_CHECKING else dict)(
 77                    attachments=(
 78                        []
 79                        if old.attachments is None
 80                        else [FileDescr(source=f) for f in old.attachments.files]
 81                    ),
 82                    authors=[
 83                        _author_conv.convert_as_dict(a) for a in old.authors
 84                    ],  # pyright: ignore[reportArgumentType]
 85                    badges=old.badges,
 86                    cite=[
 87                        {"text": c.text, "doi": c.doi, "url": c.url} for c in old.cite
 88                    ],  # pyright: ignore[reportArgumentType]
 89                    config=old.config,  # pyright: ignore[reportArgumentType]
 90                    covers=old.covers,
 91                    description=old.description,
 92                    documentation=cast(DocumentationSource, old.documentation),
 93                    format_version="0.3.0",
 94                    git_repo=old.git_repo,  # pyright: ignore[reportArgumentType]
 95                    icon=old.icon,
 96                    id=None if old.id is None else DatasetId(old.id),
 97                    license=old.license,  # type: ignore
 98                    links=old.links,
 99                    maintainers=[
100                        _maintainer_conv.convert_as_dict(m) for m in old.maintainers
101                    ],  # pyright: ignore[reportArgumentType]
102                    name=old.name,
103                    source=old.source,
104                    tags=old.tags,
105                    type=old.type,
106                    uploader=old.uploader,
107                    version=old.version,
108                    **(old.model_extra or {}),
109                ),
110            )
111
112        return data

A bioimage.io dataset resource description file (dataset RDF) describes a dataset relevant to bioimage processing.

implemented_type: ClassVar[Literal['dataset']] = 'dataset'

bioimage.io-wide unique resource identifier assigned by bioimage.io; version unspecific.

The description from which this one is derived

source: Optional[bioimageio.spec._internal.url.HttpUrl]

"URL to the source of the dataset.

implemented_format_version_tuple: ClassVar[Tuple[int, int, int]] = (0, 3, 0)
def model_post_init(self: pydantic.main.BaseModel, context: Any, /) -> None:
124                    def wrapped_model_post_init(self: BaseModel, context: Any, /) -> None:
125                        """We need to both initialize private attributes and call the user-defined model_post_init
126                        method.
127                        """
128                        init_private_attributes(self, context)
129                        original_model_post_init(self, context)

We need to both initialize private attributes and call the user-defined model_post_init method.

def dump_description( rd: Union[Annotated[Union[Annotated[Union[Annotated[bioimageio.spec.application.v0_2.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.2')], Annotated[ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='application')], Annotated[Union[Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], Annotated[DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')], Annotated[Union[Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], Annotated[ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')], Annotated[Union[Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.2')], Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='notebook')]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.generic.v0_2.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.2')], Annotated[GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='generic')], InvalidDescr], /, *, exclude_unset: bool = True, exclude_defaults: bool = False) -> Dict[str, YamlValue]:
66def dump_description(
67    rd: Union[ResourceDescr, InvalidDescr],
68    /,
69    *,
70    exclude_unset: bool = True,
71    exclude_defaults: bool = False,
72) -> BioimageioYamlContent:
73    """Converts a resource to a dictionary containing only simple types that can directly be serialzed to YAML.
74
75    Args:
76        rd: bioimageio resource description
77        exclude_unset: Exclude fields that have not explicitly be set.
78        exclude_defaults: Exclude fields that have the default value (even if set explicitly).
79    """
80    return rd.model_dump(
81        mode="json", exclude_unset=exclude_unset, exclude_defaults=exclude_defaults
82    )

Converts a resource to a dictionary containing only simple types that can directly be serialzed to YAML.

Arguments:
  • rd: bioimageio resource description
  • exclude_unset: Exclude fields that have not explicitly be set.
  • exclude_defaults: Exclude fields that have the default value (even if set explicitly).
def enable_pretty_validation_errors_in_ipynb():
72    def enable_pretty_validation_errors_in_ipynb():
73        """A modestly hacky way to display prettified validaiton error messages and traceback
74        in interactive Python notebooks"""
75        ipy = get_ipython()
76        if ipy is not None:
77            ipy.set_custom_exc((ValidationError,), _custom_exception_handler)

A modestly hacky way to display prettified validaiton error messages and traceback in interactive Python notebooks

class GenericDescr(bioimageio.spec.generic.v0_3.GenericDescrBase):
476class GenericDescr(GenericDescrBase, extra="ignore"):
477    """Specification of the fields used in a generic bioimage.io-compliant resource description file (RDF).
478
479    An RDF is a YAML file that describes a resource such as a model, a dataset, or a notebook.
480    Note that those resources are described with a type-specific RDF.
481    Use this generic resource description, if none of the known specific types matches your resource.
482    """
483
484    type: Annotated[str, LowerCase] = Field("generic", frozen=True)
485    """The resource type assigns a broad category to the resource."""
486
487    id: Optional[
488        Annotated[ResourceId, Field(examples=["affable-shark", "ambitious-sloth"])]
489    ] = None
490    """bioimage.io-wide unique resource identifier
491    assigned by bioimage.io; version **un**specific."""
492
493    parent: Optional[ResourceId] = None
494    """The description from which this one is derived"""
495
496    source: Optional[HttpUrl] = None
497    """The primary source of the resource"""
498
499    @field_validator("type", mode="after")
500    @classmethod
501    def check_specific_types(cls, value: str) -> str:
502        if value in KNOWN_SPECIFIC_RESOURCE_TYPES:
503            raise ValueError(
504                f"Use the {value} description instead of this generic description for"
505                + f" your '{value}' resource."
506            )
507
508        return value

Specification of the fields used in a generic bioimage.io-compliant resource description file (RDF).

An RDF is a YAML file that describes a resource such as a model, a dataset, or a notebook. Note that those resources are described with a type-specific RDF. Use this generic resource description, if none of the known specific types matches your resource.

type: Annotated[str, Annotated[~_StrType, Predicate(str.islower)]]

The resource type assigns a broad category to the resource.

id: Optional[Annotated[bioimageio.spec.generic.v0_3.ResourceId, FieldInfo(annotation=NoneType, required=True, examples=['affable-shark', 'ambitious-sloth'])]]

bioimage.io-wide unique resource identifier assigned by bioimage.io; version unspecific.

The description from which this one is derived

source: Optional[bioimageio.spec._internal.url.HttpUrl]

The primary source of the resource

@field_validator('type', mode='after')
@classmethod
def check_specific_types(cls, value: str) -> str:
499    @field_validator("type", mode="after")
500    @classmethod
501    def check_specific_types(cls, value: str) -> str:
502        if value in KNOWN_SPECIFIC_RESOURCE_TYPES:
503            raise ValueError(
504                f"Use the {value} description instead of this generic description for"
505                + f" your '{value}' resource."
506            )
507
508        return value
implemented_format_version_tuple: ClassVar[Tuple[int, int, int]] = (0, 3, 0)
def model_post_init(self: pydantic.main.BaseModel, context: Any, /) -> None:
124                    def wrapped_model_post_init(self: BaseModel, context: Any, /) -> None:
125                        """We need to both initialize private attributes and call the user-defined model_post_init
126                        method.
127                        """
128                        init_private_attributes(self, context)
129                        original_model_post_init(self, context)

We need to both initialize private attributes and call the user-defined model_post_init method.

28def get_conda_env(
29    *,
30    entry: SupportedWeightsEntry,
31    env_name: Optional[Union[Literal["DROP"], str]] = None,
32) -> BioimageioCondaEnv:
33    """get the recommended Conda environment for a given weights entry description"""
34    if isinstance(entry, (v0_4.OnnxWeightsDescr, v0_5.OnnxWeightsDescr)):
35        conda_env = _get_default_onnx_env(opset_version=entry.opset_version)
36    elif isinstance(
37        entry,
38        (
39            v0_4.PytorchStateDictWeightsDescr,
40            v0_5.PytorchStateDictWeightsDescr,
41            v0_4.TorchscriptWeightsDescr,
42            v0_5.TorchscriptWeightsDescr,
43        ),
44    ):
45        if (
46            isinstance(entry, v0_5.TorchscriptWeightsDescr)
47            or entry.dependencies is None
48        ):
49            conda_env = _get_default_pytorch_env(pytorch_version=entry.pytorch_version)
50        else:
51            conda_env = _get_env_from_deps(entry.dependencies)
52
53    elif isinstance(
54        entry,
55        (
56            v0_4.TensorflowSavedModelBundleWeightsDescr,
57            v0_5.TensorflowSavedModelBundleWeightsDescr,
58        ),
59    ):
60        if entry.dependencies is None:
61            conda_env = _get_default_tf_env(tensorflow_version=entry.tensorflow_version)
62        else:
63            conda_env = _get_env_from_deps(entry.dependencies)
64    elif isinstance(
65        entry,
66        (v0_4.KerasHdf5WeightsDescr, v0_5.KerasHdf5WeightsDescr),
67    ):
68        conda_env = _get_default_tf_env(tensorflow_version=entry.tensorflow_version)
69    else:
70        assert_never(entry)
71
72    if env_name == "DROP":
73        conda_env.name = None
74    elif env_name is not None:
75        conda_env.name = env_name
76
77    return conda_env

get the recommended Conda environment for a given weights entry description

def get_resource_package_content( rd: Union[Annotated[Union[Annotated[Union[Annotated[bioimageio.spec.application.v0_2.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.2')], Annotated[ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='application')], Annotated[Union[Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], Annotated[DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')], Annotated[Union[Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], Annotated[ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')], Annotated[Union[Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.2')], Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='notebook')]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.generic.v0_2.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.2')], Annotated[GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='generic')]], /, *, bioimageio_yaml_file_name: str = 'rdf.yaml', weights_priority_order: Optional[Sequence[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript']]] = None) -> Dict[str, Union[bioimageio.spec._internal.url.HttpUrl, Annotated[pathlib.Path, PathType(path_type='file'), Predicate(is_absolute), FieldInfo(annotation=NoneType, required=True, title='AbsoluteFilePath')], Dict[str, YamlValue], zipp.Path]]:
31def get_resource_package_content(
32    rd: ResourceDescr,
33    /,
34    *,
35    bioimageio_yaml_file_name: FileName = BIOIMAGEIO_YAML,
36    weights_priority_order: Optional[Sequence[WeightsFormat]] = None,  # model only
37) -> Dict[FileName, Union[HttpUrl, AbsoluteFilePath, BioimageioYamlContent, ZipPath]]:
38    """
39    Args:
40        rd: resource description
41        bioimageio_yaml_file_name: RDF file name
42        # for model resources only:
43        weights_priority_order: If given, only the first weights format present in the model is included.
44                                If none of the prioritized weights formats is found a ValueError is raised.
45    """
46    os_friendly_name = get_os_friendly_file_name(rd.name)
47    bioimageio_yaml_file_name = bioimageio_yaml_file_name.format(
48        name=os_friendly_name, type=rd.type
49    )
50
51    bioimageio_yaml_file_name = ensure_is_valid_bioimageio_yaml_name(
52        bioimageio_yaml_file_name
53    )
54    content: Dict[FileName, Union[HttpUrl, AbsoluteFilePath, ZipPath]] = {}
55    with PackagingContext(
56        bioimageio_yaml_file_name=bioimageio_yaml_file_name,
57        file_sources=content,
58        weights_priority_order=weights_priority_order,
59    ):
60        rdf_content: BioimageioYamlContent = rd.model_dump(
61            mode="json", exclude_unset=True
62        )
63
64    _ = rdf_content.pop("rdf_source", None)
65
66    return {**content, bioimageio_yaml_file_name: rdf_content}
Arguments:
  • rd: resource description
  • bioimageio_yaml_file_name: RDF file name
  • # for model resources only:
  • weights_priority_order: If given, only the first weights format present in the model is included. If none of the prioritized weights formats is found a ValueError is raised.
def get_validation_context( default: Optional[ValidationContext] = None) -> ValidationContext:
169def get_validation_context(
170    default: Optional[ValidationContext] = None,
171) -> ValidationContext:
172    """Get the currently active validation context (or a default)"""
173    return _validation_context_var.get() or default or ValidationContext()

Get the currently active validation context (or a default)

class InvalidDescr(bioimageio.spec._internal.common_nodes.ResourceDescrBase):
356class InvalidDescr(
357    ResourceDescrBase,
358    extra="allow",
359    title="An invalid resource description",
360):
361    """A representation of an invalid resource description"""
362
363    implemented_type: ClassVar[Literal["unknown"]] = "unknown"
364    if TYPE_CHECKING:  # see NodeWithExplicitlySetFields
365        type: Any = "unknown"
366    else:
367        type: Any
368
369    implemented_format_version: ClassVar[Literal["unknown"]] = "unknown"
370    if TYPE_CHECKING:  # see NodeWithExplicitlySetFields
371        format_version: Any = "unknown"
372    else:
373        format_version: Any

A representation of an invalid resource description

implemented_type: ClassVar[Literal['unknown']] = 'unknown'
implemented_format_version: ClassVar[Literal['unknown']] = 'unknown'
implemented_format_version_tuple: ClassVar[Tuple[int, int, int]] = (0, 0, 0)
def model_post_init(self: pydantic.main.BaseModel, context: Any, /) -> None:
124                    def wrapped_model_post_init(self: BaseModel, context: Any, /) -> None:
125                        """We need to both initialize private attributes and call the user-defined model_post_init
126                        method.
127                        """
128                        init_private_attributes(self, context)
129                        original_model_post_init(self, context)

We need to both initialize private attributes and call the user-defined model_post_init method.

LatestResourceDescr = typing.Union[typing.Annotated[typing.Union[ApplicationDescr, DatasetDescr, ModelDescr, NotebookDescr], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], GenericDescr]
def load_dataset_description( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file'), FieldInfo(annotation=NoneType, required=True, title='FilePath')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)], zipfile.ZipFile], /, *, format_version: Union[Literal['latest', 'discover'], str] = 'discover', perform_io_checks: Optional[bool] = None, known_files: Optional[Dict[str, bioimageio.spec._internal.io_basics.Sha256]] = None, sha256: Optional[bioimageio.spec._internal.io_basics.Sha256] = None) -> Annotated[Union[Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], Annotated[DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')]:
177def load_dataset_description(
178    source: Union[PermissiveFileSource, ZipFile],
179    /,
180    *,
181    format_version: Union[FormatVersionPlaceholder, str] = DISCOVER,
182    perform_io_checks: Optional[bool] = None,
183    known_files: Optional[Dict[str, Sha256]] = None,
184    sha256: Optional[Sha256] = None,
185) -> AnyDatasetDescr:
186    """same as `load_description`, but addtionally ensures that the loaded
187    description is valid and of type 'dataset'.
188    """
189    rd = load_description(
190        source,
191        format_version=format_version,
192        perform_io_checks=perform_io_checks,
193        known_files=known_files,
194        sha256=sha256,
195    )
196    return ensure_description_is_dataset(rd)

same as load_description, but addtionally ensures that the loaded description is valid and of type 'dataset'.

def load_description_and_validate_format_only( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file'), FieldInfo(annotation=NoneType, required=True, title='FilePath')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)], zipfile.ZipFile], /, *, format_version: Union[Literal['latest', 'discover'], str] = 'discover', perform_io_checks: Optional[bool] = None, known_files: Optional[Dict[str, bioimageio.spec._internal.io_basics.Sha256]] = None, sha256: Optional[bioimageio.spec._internal.io_basics.Sha256] = None) -> ValidationSummary:
229def load_description_and_validate_format_only(
230    source: Union[PermissiveFileSource, ZipFile],
231    /,
232    *,
233    format_version: Union[FormatVersionPlaceholder, str] = DISCOVER,
234    perform_io_checks: Optional[bool] = None,
235    known_files: Optional[Dict[str, Sha256]] = None,
236    sha256: Optional[Sha256] = None,
237) -> ValidationSummary:
238    """same as `load_description`, but only return the validation summary.
239
240    Returns:
241        Validation summary of the bioimage.io resource found at `source`.
242
243    """
244    rd = load_description(
245        source,
246        format_version=format_version,
247        perform_io_checks=perform_io_checks,
248        known_files=known_files,
249        sha256=sha256,
250    )
251    assert rd.validation_summary is not None
252    return rd.validation_summary

same as load_description, but only return the validation summary.

Returns:

Validation summary of the bioimage.io resource found at source.

def load_description( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file'), FieldInfo(annotation=NoneType, required=True, title='FilePath')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)], zipfile.ZipFile], /, *, format_version: Union[Literal['latest', 'discover'], str] = 'discover', perform_io_checks: Optional[bool] = None, known_files: Optional[Dict[str, bioimageio.spec._internal.io_basics.Sha256]] = None, sha256: Optional[bioimageio.spec._internal.io_basics.Sha256] = None) -> Union[Annotated[Union[Annotated[Union[Annotated[bioimageio.spec.application.v0_2.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.2')], Annotated[ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='application')], Annotated[Union[Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], Annotated[DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')], Annotated[Union[Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], Annotated[ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')], Annotated[Union[Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.2')], Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='notebook')]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.generic.v0_2.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.2')], Annotated[GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='generic')], InvalidDescr]:
 56def load_description(
 57    source: Union[PermissiveFileSource, ZipFile],
 58    /,
 59    *,
 60    format_version: Union[FormatVersionPlaceholder, str] = DISCOVER,
 61    perform_io_checks: Optional[bool] = None,
 62    known_files: Optional[Dict[str, Sha256]] = None,
 63    sha256: Optional[Sha256] = None,
 64) -> Union[ResourceDescr, InvalidDescr]:
 65    """load a bioimage.io resource description
 66
 67    Args:
 68        source: Path or URL to an rdf.yaml or a bioimage.io package
 69                (zip-file with rdf.yaml in it).
 70        format_version: (optional) Use this argument to load the resource and
 71                        convert its metadata to a higher format_version.
 72        perform_io_checks: Wether or not to perform validation that requires file io,
 73                           e.g. downloading a remote files. The existence of local
 74                           absolute file paths is still being checked.
 75        known_files: Allows to bypass download and hashing of referenced files
 76                     (even if perform_io_checks is True).
 77        sha256: Optional SHA-256 value of **source**
 78
 79    Returns:
 80        An object holding all metadata of the bioimage.io resource
 81
 82    """
 83    if isinstance(source, ResourceDescrBase):
 84        name = getattr(source, "name", f"{str(source)[:10]}...")
 85        logger.warning("returning already loaded description '{}' as is", name)
 86        return source  # pyright: ignore[reportReturnType]
 87
 88    opened = open_bioimageio_yaml(source, sha256=sha256)
 89
 90    context = get_validation_context().replace(
 91        root=opened.original_root,
 92        file_name=opened.original_file_name,
 93        perform_io_checks=perform_io_checks,
 94        known_files=known_files,
 95    )
 96
 97    return build_description(
 98        opened.content,
 99        context=context,
100        format_version=format_version,
101    )

load a bioimage.io resource description

Arguments:
  • source: Path or URL to an rdf.yaml or a bioimage.io package (zip-file with rdf.yaml in it).
  • format_version: (optional) Use this argument to load the resource and convert its metadata to a higher format_version.
  • perform_io_checks: Wether or not to perform validation that requires file io, e.g. downloading a remote files. The existence of local absolute file paths is still being checked.
  • known_files: Allows to bypass download and hashing of referenced files (even if perform_io_checks is True).
  • sha256: Optional SHA-256 value of source
Returns:

An object holding all metadata of the bioimage.io resource

def load_model_description( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file'), FieldInfo(annotation=NoneType, required=True, title='FilePath')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)], zipfile.ZipFile], /, *, format_version: Union[Literal['latest', 'discover'], str] = 'discover', perform_io_checks: Optional[bool] = None, known_files: Optional[Dict[str, bioimageio.spec._internal.io_basics.Sha256]] = None, sha256: Optional[bioimageio.spec._internal.io_basics.Sha256] = None) -> Annotated[Union[Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], Annotated[ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')]:
128def load_model_description(
129    source: Union[PermissiveFileSource, ZipFile],
130    /,
131    *,
132    format_version: Union[FormatVersionPlaceholder, str] = DISCOVER,
133    perform_io_checks: Optional[bool] = None,
134    known_files: Optional[Dict[str, Sha256]] = None,
135    sha256: Optional[Sha256] = None,
136) -> AnyModelDescr:
137    """same as `load_description`, but addtionally ensures that the loaded
138    description is valid and of type 'model'.
139
140    Raises:
141        ValueError: for invalid or non-model resources
142    """
143    rd = load_description(
144        source,
145        format_version=format_version,
146        perform_io_checks=perform_io_checks,
147        known_files=known_files,
148        sha256=sha256,
149    )
150    return ensure_description_is_model(rd)

same as load_description, but addtionally ensures that the loaded description is valid and of type 'model'.

Raises:
  • ValueError: for invalid or non-model resources
2512class ModelDescr(GenericModelDescrBase):
2513    """Specification of the fields used in a bioimage.io-compliant RDF to describe AI models with pretrained weights.
2514    These fields are typically stored in a YAML file which we call a model resource description file (model RDF).
2515    """
2516
2517    implemented_format_version: ClassVar[Literal["0.5.4"]] = "0.5.4"
2518    if TYPE_CHECKING:
2519        format_version: Literal["0.5.4"] = "0.5.4"
2520    else:
2521        format_version: Literal["0.5.4"]
2522        """Version of the bioimage.io model description specification used.
2523        When creating a new model always use the latest micro/patch version described here.
2524        The `format_version` is important for any consumer software to understand how to parse the fields.
2525        """
2526
2527    implemented_type: ClassVar[Literal["model"]] = "model"
2528    if TYPE_CHECKING:
2529        type: Literal["model"] = "model"
2530    else:
2531        type: Literal["model"]
2532        """Specialized resource type 'model'"""
2533
2534    id: Optional[ModelId] = None
2535    """bioimage.io-wide unique resource identifier
2536    assigned by bioimage.io; version **un**specific."""
2537
2538    authors: NotEmpty[List[Author]]
2539    """The authors are the creators of the model RDF and the primary points of contact."""
2540
2541    documentation: Annotated[
2542        DocumentationSource,
2543        Field(
2544            examples=[
2545                "https://raw.githubusercontent.com/bioimage-io/spec-bioimage-io/main/example_descriptions/models/unet2d_nuclei_broad/README.md",
2546                "README.md",
2547            ],
2548        ),
2549    ]
2550    """∈📦 URL or relative path to a markdown file with additional documentation.
2551    The recommended documentation file name is `README.md`. An `.md` suffix is mandatory.
2552    The documentation should include a '#[#] Validation' (sub)section
2553    with details on how to quantitatively validate the model on unseen data."""
2554
2555    @field_validator("documentation", mode="after")
2556    @classmethod
2557    def _validate_documentation(cls, value: DocumentationSource) -> DocumentationSource:
2558        if not get_validation_context().perform_io_checks:
2559            return value
2560
2561        doc_path = download(value).path
2562        doc_content = doc_path.read_text(encoding="utf-8")
2563        assert isinstance(doc_content, str)
2564        if not re.search("#.*[vV]alidation", doc_content):
2565            issue_warning(
2566                "No '# Validation' (sub)section found in {value}.",
2567                value=value,
2568                field="documentation",
2569            )
2570
2571        return value
2572
2573    inputs: NotEmpty[Sequence[InputTensorDescr]]
2574    """Describes the input tensors expected by this model."""
2575
2576    @field_validator("inputs", mode="after")
2577    @classmethod
2578    def _validate_input_axes(
2579        cls, inputs: Sequence[InputTensorDescr]
2580    ) -> Sequence[InputTensorDescr]:
2581        input_size_refs = cls._get_axes_with_independent_size(inputs)
2582
2583        for i, ipt in enumerate(inputs):
2584            valid_independent_refs: Dict[
2585                Tuple[TensorId, AxisId],
2586                Tuple[TensorDescr, AnyAxis, Union[int, ParameterizedSize]],
2587            ] = {
2588                **{
2589                    (ipt.id, a.id): (ipt, a, a.size)
2590                    for a in ipt.axes
2591                    if not isinstance(a, BatchAxis)
2592                    and isinstance(a.size, (int, ParameterizedSize))
2593                },
2594                **input_size_refs,
2595            }
2596            for a, ax in enumerate(ipt.axes):
2597                cls._validate_axis(
2598                    "inputs",
2599                    i=i,
2600                    tensor_id=ipt.id,
2601                    a=a,
2602                    axis=ax,
2603                    valid_independent_refs=valid_independent_refs,
2604                )
2605        return inputs
2606
2607    @staticmethod
2608    def _validate_axis(
2609        field_name: str,
2610        i: int,
2611        tensor_id: TensorId,
2612        a: int,
2613        axis: AnyAxis,
2614        valid_independent_refs: Dict[
2615            Tuple[TensorId, AxisId],
2616            Tuple[TensorDescr, AnyAxis, Union[int, ParameterizedSize]],
2617        ],
2618    ):
2619        if isinstance(axis, BatchAxis) or isinstance(
2620            axis.size, (int, ParameterizedSize, DataDependentSize)
2621        ):
2622            return
2623        elif not isinstance(axis.size, SizeReference):
2624            assert_never(axis.size)
2625
2626        # validate axis.size SizeReference
2627        ref = (axis.size.tensor_id, axis.size.axis_id)
2628        if ref not in valid_independent_refs:
2629            raise ValueError(
2630                "Invalid tensor axis reference at"
2631                + f" {field_name}[{i}].axes[{a}].size: {axis.size}."
2632            )
2633        if ref == (tensor_id, axis.id):
2634            raise ValueError(
2635                "Self-referencing not allowed for"
2636                + f" {field_name}[{i}].axes[{a}].size: {axis.size}"
2637            )
2638        if axis.type == "channel":
2639            if valid_independent_refs[ref][1].type != "channel":
2640                raise ValueError(
2641                    "A channel axis' size may only reference another fixed size"
2642                    + " channel axis."
2643                )
2644            if isinstance(axis.channel_names, str) and "{i}" in axis.channel_names:
2645                ref_size = valid_independent_refs[ref][2]
2646                assert isinstance(ref_size, int), (
2647                    "channel axis ref (another channel axis) has to specify fixed"
2648                    + " size"
2649                )
2650                generated_channel_names = [
2651                    Identifier(axis.channel_names.format(i=i))
2652                    for i in range(1, ref_size + 1)
2653                ]
2654                axis.channel_names = generated_channel_names
2655
2656        if (ax_unit := getattr(axis, "unit", None)) != (
2657            ref_unit := getattr(valid_independent_refs[ref][1], "unit", None)
2658        ):
2659            raise ValueError(
2660                "The units of an axis and its reference axis need to match, but"
2661                + f" '{ax_unit}' != '{ref_unit}'."
2662            )
2663        ref_axis = valid_independent_refs[ref][1]
2664        if isinstance(ref_axis, BatchAxis):
2665            raise ValueError(
2666                f"Invalid reference axis '{ref_axis.id}' for {tensor_id}.{axis.id}"
2667                + " (a batch axis is not allowed as reference)."
2668            )
2669
2670        if isinstance(axis, WithHalo):
2671            min_size = axis.size.get_size(axis, ref_axis, n=0)
2672            if (min_size - 2 * axis.halo) < 1:
2673                raise ValueError(
2674                    f"axis {axis.id} with minimum size {min_size} is too small for halo"
2675                    + f" {axis.halo}."
2676                )
2677
2678            input_halo = axis.halo * axis.scale / ref_axis.scale
2679            if input_halo != int(input_halo) or input_halo % 2 == 1:
2680                raise ValueError(
2681                    f"input_halo {input_halo} (output_halo {axis.halo} *"
2682                    + f" output_scale {axis.scale} / input_scale {ref_axis.scale})"
2683                    + f"     {tensor_id}.{axis.id}."
2684                )
2685
2686    @model_validator(mode="after")
2687    def _validate_test_tensors(self) -> Self:
2688        if not get_validation_context().perform_io_checks:
2689            return self
2690
2691        test_output_arrays = [
2692            load_array(descr.test_tensor.download().path) for descr in self.outputs
2693        ]
2694        test_input_arrays = [
2695            load_array(descr.test_tensor.download().path) for descr in self.inputs
2696        ]
2697
2698        tensors = {
2699            descr.id: (descr, array)
2700            for descr, array in zip(
2701                chain(self.inputs, self.outputs), test_input_arrays + test_output_arrays
2702            )
2703        }
2704        validate_tensors(tensors, tensor_origin="test_tensor")
2705
2706        output_arrays = {
2707            descr.id: array for descr, array in zip(self.outputs, test_output_arrays)
2708        }
2709        for rep_tol in self.config.bioimageio.reproducibility_tolerance:
2710            if not rep_tol.absolute_tolerance:
2711                continue
2712
2713            if rep_tol.output_ids:
2714                out_arrays = {
2715                    oid: a
2716                    for oid, a in output_arrays.items()
2717                    if oid in rep_tol.output_ids
2718                }
2719            else:
2720                out_arrays = output_arrays
2721
2722            for out_id, array in out_arrays.items():
2723                if rep_tol.absolute_tolerance > (max_test_value := array.max()) * 0.01:
2724                    raise ValueError(
2725                        "config.bioimageio.reproducibility_tolerance.absolute_tolerance="
2726                        + f"{rep_tol.absolute_tolerance} > 0.01*{max_test_value}"
2727                        + f" (1% of the maximum value of the test tensor '{out_id}')"
2728                    )
2729
2730        return self
2731
2732    @model_validator(mode="after")
2733    def _validate_tensor_references_in_proc_kwargs(self, info: ValidationInfo) -> Self:
2734        ipt_refs = {t.id for t in self.inputs}
2735        out_refs = {t.id for t in self.outputs}
2736        for ipt in self.inputs:
2737            for p in ipt.preprocessing:
2738                ref = p.kwargs.get("reference_tensor")
2739                if ref is None:
2740                    continue
2741                if ref not in ipt_refs:
2742                    raise ValueError(
2743                        f"`reference_tensor` '{ref}' not found. Valid input tensor"
2744                        + f" references are: {ipt_refs}."
2745                    )
2746
2747        for out in self.outputs:
2748            for p in out.postprocessing:
2749                ref = p.kwargs.get("reference_tensor")
2750                if ref is None:
2751                    continue
2752
2753                if ref not in ipt_refs and ref not in out_refs:
2754                    raise ValueError(
2755                        f"`reference_tensor` '{ref}' not found. Valid tensor references"
2756                        + f" are: {ipt_refs | out_refs}."
2757                    )
2758
2759        return self
2760
2761    # TODO: use validate funcs in validate_test_tensors
2762    # def validate_inputs(self, input_tensors: Mapping[TensorId, NDArray[Any]]) -> Mapping[TensorId, NDArray[Any]]:
2763
2764    name: Annotated[
2765        Annotated[
2766            str, RestrictCharacters(string.ascii_letters + string.digits + "_+- ()")
2767        ],
2768        MinLen(5),
2769        MaxLen(128),
2770        warn(MaxLen(64), "Name longer than 64 characters.", INFO),
2771    ]
2772    """A human-readable name of this model.
2773    It should be no longer than 64 characters
2774    and may only contain letter, number, underscore, minus, parentheses and spaces.
2775    We recommend to chose a name that refers to the model's task and image modality.
2776    """
2777
2778    outputs: NotEmpty[Sequence[OutputTensorDescr]]
2779    """Describes the output tensors."""
2780
2781    @field_validator("outputs", mode="after")
2782    @classmethod
2783    def _validate_tensor_ids(
2784        cls, outputs: Sequence[OutputTensorDescr], info: ValidationInfo
2785    ) -> Sequence[OutputTensorDescr]:
2786        tensor_ids = [
2787            t.id for t in info.data.get("inputs", []) + info.data.get("outputs", [])
2788        ]
2789        duplicate_tensor_ids: List[str] = []
2790        seen: Set[str] = set()
2791        for t in tensor_ids:
2792            if t in seen:
2793                duplicate_tensor_ids.append(t)
2794
2795            seen.add(t)
2796
2797        if duplicate_tensor_ids:
2798            raise ValueError(f"Duplicate tensor ids: {duplicate_tensor_ids}")
2799
2800        return outputs
2801
2802    @staticmethod
2803    def _get_axes_with_parameterized_size(
2804        io: Union[Sequence[InputTensorDescr], Sequence[OutputTensorDescr]],
2805    ):
2806        return {
2807            f"{t.id}.{a.id}": (t, a, a.size)
2808            for t in io
2809            for a in t.axes
2810            if not isinstance(a, BatchAxis) and isinstance(a.size, ParameterizedSize)
2811        }
2812
2813    @staticmethod
2814    def _get_axes_with_independent_size(
2815        io: Union[Sequence[InputTensorDescr], Sequence[OutputTensorDescr]],
2816    ):
2817        return {
2818            (t.id, a.id): (t, a, a.size)
2819            for t in io
2820            for a in t.axes
2821            if not isinstance(a, BatchAxis)
2822            and isinstance(a.size, (int, ParameterizedSize))
2823        }
2824
2825    @field_validator("outputs", mode="after")
2826    @classmethod
2827    def _validate_output_axes(
2828        cls, outputs: List[OutputTensorDescr], info: ValidationInfo
2829    ) -> List[OutputTensorDescr]:
2830        input_size_refs = cls._get_axes_with_independent_size(
2831            info.data.get("inputs", [])
2832        )
2833        output_size_refs = cls._get_axes_with_independent_size(outputs)
2834
2835        for i, out in enumerate(outputs):
2836            valid_independent_refs: Dict[
2837                Tuple[TensorId, AxisId],
2838                Tuple[TensorDescr, AnyAxis, Union[int, ParameterizedSize]],
2839            ] = {
2840                **{
2841                    (out.id, a.id): (out, a, a.size)
2842                    for a in out.axes
2843                    if not isinstance(a, BatchAxis)
2844                    and isinstance(a.size, (int, ParameterizedSize))
2845                },
2846                **input_size_refs,
2847                **output_size_refs,
2848            }
2849            for a, ax in enumerate(out.axes):
2850                cls._validate_axis(
2851                    "outputs",
2852                    i,
2853                    out.id,
2854                    a,
2855                    ax,
2856                    valid_independent_refs=valid_independent_refs,
2857                )
2858
2859        return outputs
2860
2861    packaged_by: List[Author] = Field(default_factory=list)
2862    """The persons that have packaged and uploaded this model.
2863    Only required if those persons differ from the `authors`."""
2864
2865    parent: Optional[LinkedModel] = None
2866    """The model from which this model is derived, e.g. by fine-tuning the weights."""
2867
2868    @model_validator(mode="after")
2869    def _validate_parent_is_not_self(self) -> Self:
2870        if self.parent is not None and self.parent.id == self.id:
2871            raise ValueError("A model description may not reference itself as parent.")
2872
2873        return self
2874
2875    run_mode: Annotated[
2876        Optional[RunMode],
2877        warn(None, "Run mode '{value}' has limited support across consumer softwares."),
2878    ] = None
2879    """Custom run mode for this model: for more complex prediction procedures like test time
2880    data augmentation that currently cannot be expressed in the specification.
2881    No standard run modes are defined yet."""
2882
2883    timestamp: Datetime = Field(default_factory=Datetime.now)
2884    """Timestamp in [ISO 8601](#https://en.wikipedia.org/wiki/ISO_8601) format
2885    with a few restrictions listed [here](https://docs.python.org/3/library/datetime.html#datetime.datetime.fromisoformat).
2886    (In Python a datetime object is valid, too)."""
2887
2888    training_data: Annotated[
2889        Union[None, LinkedDataset, DatasetDescr, DatasetDescr02],
2890        Field(union_mode="left_to_right"),
2891    ] = None
2892    """The dataset used to train this model"""
2893
2894    weights: Annotated[WeightsDescr, WrapSerializer(package_weights)]
2895    """The weights for this model.
2896    Weights can be given for different formats, but should otherwise be equivalent.
2897    The available weight formats determine which consumers can use this model."""
2898
2899    config: Config = Field(default_factory=Config)
2900
2901    @model_validator(mode="after")
2902    def _add_default_cover(self) -> Self:
2903        if not get_validation_context().perform_io_checks or self.covers:
2904            return self
2905
2906        try:
2907            generated_covers = generate_covers(
2908                [(t, load_array(t.test_tensor.download().path)) for t in self.inputs],
2909                [(t, load_array(t.test_tensor.download().path)) for t in self.outputs],
2910            )
2911        except Exception as e:
2912            issue_warning(
2913                "Failed to generate cover image(s): {e}",
2914                value=self.covers,
2915                msg_context=dict(e=e),
2916                field="covers",
2917            )
2918        else:
2919            self.covers.extend(generated_covers)
2920
2921        return self
2922
2923    def get_input_test_arrays(self) -> List[NDArray[Any]]:
2924        data = [load_array(ipt.test_tensor.download().path) for ipt in self.inputs]
2925        assert all(isinstance(d, np.ndarray) for d in data)
2926        return data
2927
2928    def get_output_test_arrays(self) -> List[NDArray[Any]]:
2929        data = [load_array(out.test_tensor.download().path) for out in self.outputs]
2930        assert all(isinstance(d, np.ndarray) for d in data)
2931        return data
2932
2933    @staticmethod
2934    def get_batch_size(tensor_sizes: Mapping[TensorId, Mapping[AxisId, int]]) -> int:
2935        batch_size = 1
2936        tensor_with_batchsize: Optional[TensorId] = None
2937        for tid in tensor_sizes:
2938            for aid, s in tensor_sizes[tid].items():
2939                if aid != BATCH_AXIS_ID or s == 1 or s == batch_size:
2940                    continue
2941
2942                if batch_size != 1:
2943                    assert tensor_with_batchsize is not None
2944                    raise ValueError(
2945                        f"batch size mismatch for tensors '{tensor_with_batchsize}' ({batch_size}) and '{tid}' ({s})"
2946                    )
2947
2948                batch_size = s
2949                tensor_with_batchsize = tid
2950
2951        return batch_size
2952
2953    def get_output_tensor_sizes(
2954        self, input_sizes: Mapping[TensorId, Mapping[AxisId, int]]
2955    ) -> Dict[TensorId, Dict[AxisId, Union[int, _DataDepSize]]]:
2956        """Returns the tensor output sizes for given **input_sizes**.
2957        Only if **input_sizes** has a valid input shape, the tensor output size is exact.
2958        Otherwise it might be larger than the actual (valid) output"""
2959        batch_size = self.get_batch_size(input_sizes)
2960        ns = self.get_ns(input_sizes)
2961
2962        tensor_sizes = self.get_tensor_sizes(ns, batch_size=batch_size)
2963        return tensor_sizes.outputs
2964
2965    def get_ns(self, input_sizes: Mapping[TensorId, Mapping[AxisId, int]]):
2966        """get parameter `n` for each parameterized axis
2967        such that the valid input size is >= the given input size"""
2968        ret: Dict[Tuple[TensorId, AxisId], ParameterizedSize_N] = {}
2969        axes = {t.id: {a.id: a for a in t.axes} for t in self.inputs}
2970        for tid in input_sizes:
2971            for aid, s in input_sizes[tid].items():
2972                size_descr = axes[tid][aid].size
2973                if isinstance(size_descr, ParameterizedSize):
2974                    ret[(tid, aid)] = size_descr.get_n(s)
2975                elif size_descr is None or isinstance(size_descr, (int, SizeReference)):
2976                    pass
2977                else:
2978                    assert_never(size_descr)
2979
2980        return ret
2981
2982    def get_tensor_sizes(
2983        self, ns: Mapping[Tuple[TensorId, AxisId], ParameterizedSize_N], batch_size: int
2984    ) -> _TensorSizes:
2985        axis_sizes = self.get_axis_sizes(ns, batch_size=batch_size)
2986        return _TensorSizes(
2987            {
2988                t: {
2989                    aa: axis_sizes.inputs[(tt, aa)]
2990                    for tt, aa in axis_sizes.inputs
2991                    if tt == t
2992                }
2993                for t in {tt for tt, _ in axis_sizes.inputs}
2994            },
2995            {
2996                t: {
2997                    aa: axis_sizes.outputs[(tt, aa)]
2998                    for tt, aa in axis_sizes.outputs
2999                    if tt == t
3000                }
3001                for t in {tt for tt, _ in axis_sizes.outputs}
3002            },
3003        )
3004
3005    def get_axis_sizes(
3006        self,
3007        ns: Mapping[Tuple[TensorId, AxisId], ParameterizedSize_N],
3008        batch_size: Optional[int] = None,
3009        *,
3010        max_input_shape: Optional[Mapping[Tuple[TensorId, AxisId], int]] = None,
3011    ) -> _AxisSizes:
3012        """Determine input and output block shape for scale factors **ns**
3013        of parameterized input sizes.
3014
3015        Args:
3016            ns: Scale factor `n` for each axis (keyed by (tensor_id, axis_id))
3017                that is parameterized as `size = min + n * step`.
3018            batch_size: The desired size of the batch dimension.
3019                If given **batch_size** overwrites any batch size present in
3020                **max_input_shape**. Default 1.
3021            max_input_shape: Limits the derived block shapes.
3022                Each axis for which the input size, parameterized by `n`, is larger
3023                than **max_input_shape** is set to the minimal value `n_min` for which
3024                this is still true.
3025                Use this for small input samples or large values of **ns**.
3026                Or simply whenever you know the full input shape.
3027
3028        Returns:
3029            Resolved axis sizes for model inputs and outputs.
3030        """
3031        max_input_shape = max_input_shape or {}
3032        if batch_size is None:
3033            for (_t_id, a_id), s in max_input_shape.items():
3034                if a_id == BATCH_AXIS_ID:
3035                    batch_size = s
3036                    break
3037            else:
3038                batch_size = 1
3039
3040        all_axes = {
3041            t.id: {a.id: a for a in t.axes} for t in chain(self.inputs, self.outputs)
3042        }
3043
3044        inputs: Dict[Tuple[TensorId, AxisId], int] = {}
3045        outputs: Dict[Tuple[TensorId, AxisId], Union[int, _DataDepSize]] = {}
3046
3047        def get_axis_size(a: Union[InputAxis, OutputAxis]):
3048            if isinstance(a, BatchAxis):
3049                if (t_descr.id, a.id) in ns:
3050                    logger.warning(
3051                        "Ignoring unexpected size increment factor (n) for batch axis"
3052                        + " of tensor '{}'.",
3053                        t_descr.id,
3054                    )
3055                return batch_size
3056            elif isinstance(a.size, int):
3057                if (t_descr.id, a.id) in ns:
3058                    logger.warning(
3059                        "Ignoring unexpected size increment factor (n) for fixed size"
3060                        + " axis '{}' of tensor '{}'.",
3061                        a.id,
3062                        t_descr.id,
3063                    )
3064                return a.size
3065            elif isinstance(a.size, ParameterizedSize):
3066                if (t_descr.id, a.id) not in ns:
3067                    raise ValueError(
3068                        "Size increment factor (n) missing for parametrized axis"
3069                        + f" '{a.id}' of tensor '{t_descr.id}'."
3070                    )
3071                n = ns[(t_descr.id, a.id)]
3072                s_max = max_input_shape.get((t_descr.id, a.id))
3073                if s_max is not None:
3074                    n = min(n, a.size.get_n(s_max))
3075
3076                return a.size.get_size(n)
3077
3078            elif isinstance(a.size, SizeReference):
3079                if (t_descr.id, a.id) in ns:
3080                    logger.warning(
3081                        "Ignoring unexpected size increment factor (n) for axis '{}'"
3082                        + " of tensor '{}' with size reference.",
3083                        a.id,
3084                        t_descr.id,
3085                    )
3086                assert not isinstance(a, BatchAxis)
3087                ref_axis = all_axes[a.size.tensor_id][a.size.axis_id]
3088                assert not isinstance(ref_axis, BatchAxis)
3089                ref_key = (a.size.tensor_id, a.size.axis_id)
3090                ref_size = inputs.get(ref_key, outputs.get(ref_key))
3091                assert ref_size is not None, ref_key
3092                assert not isinstance(ref_size, _DataDepSize), ref_key
3093                return a.size.get_size(
3094                    axis=a,
3095                    ref_axis=ref_axis,
3096                    ref_size=ref_size,
3097                )
3098            elif isinstance(a.size, DataDependentSize):
3099                if (t_descr.id, a.id) in ns:
3100                    logger.warning(
3101                        "Ignoring unexpected increment factor (n) for data dependent"
3102                        + " size axis '{}' of tensor '{}'.",
3103                        a.id,
3104                        t_descr.id,
3105                    )
3106                return _DataDepSize(a.size.min, a.size.max)
3107            else:
3108                assert_never(a.size)
3109
3110        # first resolve all , but the `SizeReference` input sizes
3111        for t_descr in self.inputs:
3112            for a in t_descr.axes:
3113                if not isinstance(a.size, SizeReference):
3114                    s = get_axis_size(a)
3115                    assert not isinstance(s, _DataDepSize)
3116                    inputs[t_descr.id, a.id] = s
3117
3118        # resolve all other input axis sizes
3119        for t_descr in self.inputs:
3120            for a in t_descr.axes:
3121                if isinstance(a.size, SizeReference):
3122                    s = get_axis_size(a)
3123                    assert not isinstance(s, _DataDepSize)
3124                    inputs[t_descr.id, a.id] = s
3125
3126        # resolve all output axis sizes
3127        for t_descr in self.outputs:
3128            for a in t_descr.axes:
3129                assert not isinstance(a.size, ParameterizedSize)
3130                s = get_axis_size(a)
3131                outputs[t_descr.id, a.id] = s
3132
3133        return _AxisSizes(inputs=inputs, outputs=outputs)
3134
3135    @model_validator(mode="before")
3136    @classmethod
3137    def _convert(cls, data: Dict[str, Any]) -> Dict[str, Any]:
3138        cls.convert_from_old_format_wo_validation(data)
3139        return data
3140
3141    @classmethod
3142    def convert_from_old_format_wo_validation(cls, data: Dict[str, Any]) -> None:
3143        """Convert metadata following an older format version to this classes' format
3144        without validating the result.
3145        """
3146        if (
3147            data.get("type") == "model"
3148            and isinstance(fv := data.get("format_version"), str)
3149            and fv.count(".") == 2
3150        ):
3151            fv_parts = fv.split(".")
3152            if any(not p.isdigit() for p in fv_parts):
3153                return
3154
3155            fv_tuple = tuple(map(int, fv_parts))
3156
3157            assert cls.implemented_format_version_tuple[0:2] == (0, 5)
3158            if fv_tuple[:2] in ((0, 3), (0, 4)):
3159                m04 = _ModelDescr_v0_4.load(data)
3160                if isinstance(m04, InvalidDescr):
3161                    try:
3162                        updated = _model_conv.convert_as_dict(
3163                            m04  # pyright: ignore[reportArgumentType]
3164                        )
3165                    except Exception as e:
3166                        logger.error(
3167                            "Failed to convert from invalid model 0.4 description."
3168                            + f"\nerror: {e}"
3169                            + "\nProceeding with model 0.5 validation without conversion."
3170                        )
3171                        updated = None
3172                else:
3173                    updated = _model_conv.convert_as_dict(m04)
3174
3175                if updated is not None:
3176                    data.clear()
3177                    data.update(updated)
3178
3179            elif fv_tuple[:2] == (0, 5):
3180                # bump patch version
3181                data["format_version"] = cls.implemented_format_version

Specification of the fields used in a bioimage.io-compliant RDF to describe AI models with pretrained weights. These fields are typically stored in a YAML file which we call a model resource description file (model RDF).

implemented_format_version: ClassVar[Literal['0.5.4']] = '0.5.4'
implemented_type: ClassVar[Literal['model']] = 'model'

bioimage.io-wide unique resource identifier assigned by bioimage.io; version unspecific.

authors: Annotated[List[bioimageio.spec.generic.v0_3.Author], MinLen(min_length=1)]

The authors are the creators of the model RDF and the primary points of contact.

documentation: Annotated[Union[Annotated[pathlib.Path, PathType(path_type='file'), Predicate(is_absolute), FieldInfo(annotation=NoneType, required=True, title='AbsoluteFilePath')], bioimageio.spec._internal.io.RelativeFilePath, bioimageio.spec._internal.url.HttpUrl], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), AfterValidator(func=<function _validate_md_suffix at 0x7f26013f3e20>), PlainSerializer(func=<function _package at 0x7f2602535e40>, return_type=PydanticUndefined, when_used='unless-none'), FieldInfo(annotation=NoneType, required=True, examples=['https://raw.githubusercontent.com/bioimage-io/spec-bioimage-io/main/example_descriptions/models/unet2d_nuclei_broad/README.md', 'README.md'])]

∈📦 URL or relative path to a markdown file with additional documentation. The recommended documentation file name is README.md. An .md suffix is mandatory. The documentation should include a '#[#] Validation' (sub)section with details on how to quantitatively validate the model on unseen data.

inputs: Annotated[Sequence[bioimageio.spec.model.v0_5.InputTensorDescr], MinLen(min_length=1)]

Describes the input tensors expected by this model.

name: Annotated[str, RestrictCharacters(alphabet='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_+- ()'), MinLen(min_length=5), MaxLen(max_length=128), AfterWarner(func=<function as_warning.<locals>.wrapper at 0x7f25f223c9a0>, severity=20, msg='Name longer than 64 characters.', context={'typ': Annotated[Any, MaxLen(max_length=64)]})]

A human-readable name of this model. It should be no longer than 64 characters and may only contain letter, number, underscore, minus, parentheses and spaces. We recommend to chose a name that refers to the model's task and image modality.

outputs: Annotated[Sequence[bioimageio.spec.model.v0_5.OutputTensorDescr], MinLen(min_length=1)]

Describes the output tensors.

The persons that have packaged and uploaded this model. Only required if those persons differ from the authors.

The model from which this model is derived, e.g. by fine-tuning the weights.

run_mode: Annotated[Optional[bioimageio.spec.model.v0_4.RunMode], AfterWarner(func=<function as_warning.<locals>.wrapper at 0x7f25f223ce00>, severity=30, msg="Run mode '{value}' has limited support across consumer softwares.", context={'typ': None})]

Custom run mode for this model: for more complex prediction procedures like test time data augmentation that currently cannot be expressed in the specification. No standard run modes are defined yet.

timestamp: bioimageio.spec._internal.types.Datetime

Timestamp in ISO 8601 format with a few restrictions listed here. (In Python a datetime object is valid, too).

training_data: Annotated[Union[NoneType, bioimageio.spec.dataset.v0_3.LinkedDataset, DatasetDescr, bioimageio.spec.dataset.v0_2.DatasetDescr], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])]

The dataset used to train this model

weights: Annotated[bioimageio.spec.model.v0_5.WeightsDescr, WrapSerializer(func=<function package_weights at 0x7f25f8f8df80>, return_type=PydanticUndefined, when_used='always')]

The weights for this model. Weights can be given for different formats, but should otherwise be equivalent. The available weight formats determine which consumers can use this model.

def get_input_test_arrays(self) -> List[numpy.ndarray[Any, numpy.dtype[Any]]]:
2923    def get_input_test_arrays(self) -> List[NDArray[Any]]:
2924        data = [load_array(ipt.test_tensor.download().path) for ipt in self.inputs]
2925        assert all(isinstance(d, np.ndarray) for d in data)
2926        return data
def get_output_test_arrays(self) -> List[numpy.ndarray[Any, numpy.dtype[Any]]]:
2928    def get_output_test_arrays(self) -> List[NDArray[Any]]:
2929        data = [load_array(out.test_tensor.download().path) for out in self.outputs]
2930        assert all(isinstance(d, np.ndarray) for d in data)
2931        return data
@staticmethod
def get_batch_size( tensor_sizes: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[bioimageio.spec.model.v0_5.AxisId, int]]) -> int:
2933    @staticmethod
2934    def get_batch_size(tensor_sizes: Mapping[TensorId, Mapping[AxisId, int]]) -> int:
2935        batch_size = 1
2936        tensor_with_batchsize: Optional[TensorId] = None
2937        for tid in tensor_sizes:
2938            for aid, s in tensor_sizes[tid].items():
2939                if aid != BATCH_AXIS_ID or s == 1 or s == batch_size:
2940                    continue
2941
2942                if batch_size != 1:
2943                    assert tensor_with_batchsize is not None
2944                    raise ValueError(
2945                        f"batch size mismatch for tensors '{tensor_with_batchsize}' ({batch_size}) and '{tid}' ({s})"
2946                    )
2947
2948                batch_size = s
2949                tensor_with_batchsize = tid
2950
2951        return batch_size
def get_output_tensor_sizes( self, input_sizes: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[bioimageio.spec.model.v0_5.AxisId, int]]) -> Dict[bioimageio.spec.model.v0_5.TensorId, Dict[bioimageio.spec.model.v0_5.AxisId, Union[int, bioimageio.spec.model.v0_5._DataDepSize]]]:
2953    def get_output_tensor_sizes(
2954        self, input_sizes: Mapping[TensorId, Mapping[AxisId, int]]
2955    ) -> Dict[TensorId, Dict[AxisId, Union[int, _DataDepSize]]]:
2956        """Returns the tensor output sizes for given **input_sizes**.
2957        Only if **input_sizes** has a valid input shape, the tensor output size is exact.
2958        Otherwise it might be larger than the actual (valid) output"""
2959        batch_size = self.get_batch_size(input_sizes)
2960        ns = self.get_ns(input_sizes)
2961
2962        tensor_sizes = self.get_tensor_sizes(ns, batch_size=batch_size)
2963        return tensor_sizes.outputs

Returns the tensor output sizes for given input_sizes. Only if input_sizes has a valid input shape, the tensor output size is exact. Otherwise it might be larger than the actual (valid) output

def get_ns( self, input_sizes: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[bioimageio.spec.model.v0_5.AxisId, int]]):
2965    def get_ns(self, input_sizes: Mapping[TensorId, Mapping[AxisId, int]]):
2966        """get parameter `n` for each parameterized axis
2967        such that the valid input size is >= the given input size"""
2968        ret: Dict[Tuple[TensorId, AxisId], ParameterizedSize_N] = {}
2969        axes = {t.id: {a.id: a for a in t.axes} for t in self.inputs}
2970        for tid in input_sizes:
2971            for aid, s in input_sizes[tid].items():
2972                size_descr = axes[tid][aid].size
2973                if isinstance(size_descr, ParameterizedSize):
2974                    ret[(tid, aid)] = size_descr.get_n(s)
2975                elif size_descr is None or isinstance(size_descr, (int, SizeReference)):
2976                    pass
2977                else:
2978                    assert_never(size_descr)
2979
2980        return ret

get parameter n for each parameterized axis such that the valid input size is >= the given input size

def get_tensor_sizes( self, ns: Mapping[Tuple[bioimageio.spec.model.v0_5.TensorId, bioimageio.spec.model.v0_5.AxisId], int], batch_size: int) -> bioimageio.spec.model.v0_5._TensorSizes:
2982    def get_tensor_sizes(
2983        self, ns: Mapping[Tuple[TensorId, AxisId], ParameterizedSize_N], batch_size: int
2984    ) -> _TensorSizes:
2985        axis_sizes = self.get_axis_sizes(ns, batch_size=batch_size)
2986        return _TensorSizes(
2987            {
2988                t: {
2989                    aa: axis_sizes.inputs[(tt, aa)]
2990                    for tt, aa in axis_sizes.inputs
2991                    if tt == t
2992                }
2993                for t in {tt for tt, _ in axis_sizes.inputs}
2994            },
2995            {
2996                t: {
2997                    aa: axis_sizes.outputs[(tt, aa)]
2998                    for tt, aa in axis_sizes.outputs
2999                    if tt == t
3000                }
3001                for t in {tt for tt, _ in axis_sizes.outputs}
3002            },
3003        )
def get_axis_sizes( self, ns: Mapping[Tuple[bioimageio.spec.model.v0_5.TensorId, bioimageio.spec.model.v0_5.AxisId], int], batch_size: Optional[int] = None, *, max_input_shape: Optional[Mapping[Tuple[bioimageio.spec.model.v0_5.TensorId, bioimageio.spec.model.v0_5.AxisId], int]] = None) -> bioimageio.spec.model.v0_5._AxisSizes:
3005    def get_axis_sizes(
3006        self,
3007        ns: Mapping[Tuple[TensorId, AxisId], ParameterizedSize_N],
3008        batch_size: Optional[int] = None,
3009        *,
3010        max_input_shape: Optional[Mapping[Tuple[TensorId, AxisId], int]] = None,
3011    ) -> _AxisSizes:
3012        """Determine input and output block shape for scale factors **ns**
3013        of parameterized input sizes.
3014
3015        Args:
3016            ns: Scale factor `n` for each axis (keyed by (tensor_id, axis_id))
3017                that is parameterized as `size = min + n * step`.
3018            batch_size: The desired size of the batch dimension.
3019                If given **batch_size** overwrites any batch size present in
3020                **max_input_shape**. Default 1.
3021            max_input_shape: Limits the derived block shapes.
3022                Each axis for which the input size, parameterized by `n`, is larger
3023                than **max_input_shape** is set to the minimal value `n_min` for which
3024                this is still true.
3025                Use this for small input samples or large values of **ns**.
3026                Or simply whenever you know the full input shape.
3027
3028        Returns:
3029            Resolved axis sizes for model inputs and outputs.
3030        """
3031        max_input_shape = max_input_shape or {}
3032        if batch_size is None:
3033            for (_t_id, a_id), s in max_input_shape.items():
3034                if a_id == BATCH_AXIS_ID:
3035                    batch_size = s
3036                    break
3037            else:
3038                batch_size = 1
3039
3040        all_axes = {
3041            t.id: {a.id: a for a in t.axes} for t in chain(self.inputs, self.outputs)
3042        }
3043
3044        inputs: Dict[Tuple[TensorId, AxisId], int] = {}
3045        outputs: Dict[Tuple[TensorId, AxisId], Union[int, _DataDepSize]] = {}
3046
3047        def get_axis_size(a: Union[InputAxis, OutputAxis]):
3048            if isinstance(a, BatchAxis):
3049                if (t_descr.id, a.id) in ns:
3050                    logger.warning(
3051                        "Ignoring unexpected size increment factor (n) for batch axis"
3052                        + " of tensor '{}'.",
3053                        t_descr.id,
3054                    )
3055                return batch_size
3056            elif isinstance(a.size, int):
3057                if (t_descr.id, a.id) in ns:
3058                    logger.warning(
3059                        "Ignoring unexpected size increment factor (n) for fixed size"
3060                        + " axis '{}' of tensor '{}'.",
3061                        a.id,
3062                        t_descr.id,
3063                    )
3064                return a.size
3065            elif isinstance(a.size, ParameterizedSize):
3066                if (t_descr.id, a.id) not in ns:
3067                    raise ValueError(
3068                        "Size increment factor (n) missing for parametrized axis"
3069                        + f" '{a.id}' of tensor '{t_descr.id}'."
3070                    )
3071                n = ns[(t_descr.id, a.id)]
3072                s_max = max_input_shape.get((t_descr.id, a.id))
3073                if s_max is not None:
3074                    n = min(n, a.size.get_n(s_max))
3075
3076                return a.size.get_size(n)
3077
3078            elif isinstance(a.size, SizeReference):
3079                if (t_descr.id, a.id) in ns:
3080                    logger.warning(
3081                        "Ignoring unexpected size increment factor (n) for axis '{}'"
3082                        + " of tensor '{}' with size reference.",
3083                        a.id,
3084                        t_descr.id,
3085                    )
3086                assert not isinstance(a, BatchAxis)
3087                ref_axis = all_axes[a.size.tensor_id][a.size.axis_id]
3088                assert not isinstance(ref_axis, BatchAxis)
3089                ref_key = (a.size.tensor_id, a.size.axis_id)
3090                ref_size = inputs.get(ref_key, outputs.get(ref_key))
3091                assert ref_size is not None, ref_key
3092                assert not isinstance(ref_size, _DataDepSize), ref_key
3093                return a.size.get_size(
3094                    axis=a,
3095                    ref_axis=ref_axis,
3096                    ref_size=ref_size,
3097                )
3098            elif isinstance(a.size, DataDependentSize):
3099                if (t_descr.id, a.id) in ns:
3100                    logger.warning(
3101                        "Ignoring unexpected increment factor (n) for data dependent"
3102                        + " size axis '{}' of tensor '{}'.",
3103                        a.id,
3104                        t_descr.id,
3105                    )
3106                return _DataDepSize(a.size.min, a.size.max)
3107            else:
3108                assert_never(a.size)
3109
3110        # first resolve all , but the `SizeReference` input sizes
3111        for t_descr in self.inputs:
3112            for a in t_descr.axes:
3113                if not isinstance(a.size, SizeReference):
3114                    s = get_axis_size(a)
3115                    assert not isinstance(s, _DataDepSize)
3116                    inputs[t_descr.id, a.id] = s
3117
3118        # resolve all other input axis sizes
3119        for t_descr in self.inputs:
3120            for a in t_descr.axes:
3121                if isinstance(a.size, SizeReference):
3122                    s = get_axis_size(a)
3123                    assert not isinstance(s, _DataDepSize)
3124                    inputs[t_descr.id, a.id] = s
3125
3126        # resolve all output axis sizes
3127        for t_descr in self.outputs:
3128            for a in t_descr.axes:
3129                assert not isinstance(a.size, ParameterizedSize)
3130                s = get_axis_size(a)
3131                outputs[t_descr.id, a.id] = s
3132
3133        return _AxisSizes(inputs=inputs, outputs=outputs)

Determine input and output block shape for scale factors ns of parameterized input sizes.

Arguments:
  • ns: Scale factor n for each axis (keyed by (tensor_id, axis_id)) that is parameterized as size = min + n * step.
  • batch_size: The desired size of the batch dimension. If given batch_size overwrites any batch size present in max_input_shape. Default 1.
  • max_input_shape: Limits the derived block shapes. Each axis for which the input size, parameterized by n, is larger than max_input_shape is set to the minimal value n_min for which this is still true. Use this for small input samples or large values of ns. Or simply whenever you know the full input shape.
Returns:

Resolved axis sizes for model inputs and outputs.

@classmethod
def convert_from_old_format_wo_validation(cls, data: Dict[str, Any]) -> None:
3141    @classmethod
3142    def convert_from_old_format_wo_validation(cls, data: Dict[str, Any]) -> None:
3143        """Convert metadata following an older format version to this classes' format
3144        without validating the result.
3145        """
3146        if (
3147            data.get("type") == "model"
3148            and isinstance(fv := data.get("format_version"), str)
3149            and fv.count(".") == 2
3150        ):
3151            fv_parts = fv.split(".")
3152            if any(not p.isdigit() for p in fv_parts):
3153                return
3154
3155            fv_tuple = tuple(map(int, fv_parts))
3156
3157            assert cls.implemented_format_version_tuple[0:2] == (0, 5)
3158            if fv_tuple[:2] in ((0, 3), (0, 4)):
3159                m04 = _ModelDescr_v0_4.load(data)
3160                if isinstance(m04, InvalidDescr):
3161                    try:
3162                        updated = _model_conv.convert_as_dict(
3163                            m04  # pyright: ignore[reportArgumentType]
3164                        )
3165                    except Exception as e:
3166                        logger.error(
3167                            "Failed to convert from invalid model 0.4 description."
3168                            + f"\nerror: {e}"
3169                            + "\nProceeding with model 0.5 validation without conversion."
3170                        )
3171                        updated = None
3172                else:
3173                    updated = _model_conv.convert_as_dict(m04)
3174
3175                if updated is not None:
3176                    data.clear()
3177                    data.update(updated)
3178
3179            elif fv_tuple[:2] == (0, 5):
3180                # bump patch version
3181                data["format_version"] = cls.implemented_format_version

Convert metadata following an older format version to this classes' format without validating the result.

implemented_format_version_tuple: ClassVar[Tuple[int, int, int]] = (0, 5, 4)
def model_post_init(self: pydantic.main.BaseModel, context: Any, /) -> None:
124                    def wrapped_model_post_init(self: BaseModel, context: Any, /) -> None:
125                        """We need to both initialize private attributes and call the user-defined model_post_init
126                        method.
127                        """
128                        init_private_attributes(self, context)
129                        original_model_post_init(self, context)

We need to both initialize private attributes and call the user-defined model_post_init method.

class NotebookDescr(bioimageio.spec.generic.v0_3.GenericDescrBase):
32class NotebookDescr(GenericDescrBase):
33    """Bioimage.io description of a Jupyter notebook."""
34
35    implemented_type: ClassVar[Literal["notebook"]] = "notebook"
36    if TYPE_CHECKING:
37        type: Literal["notebook"] = "notebook"
38    else:
39        type: Literal["notebook"]
40
41    id: Optional[NotebookId] = None
42    """bioimage.io-wide unique resource identifier
43    assigned by bioimage.io; version **un**specific."""
44
45    parent: Optional[NotebookId] = None
46    """The description from which this one is derived"""
47
48    source: NotebookSource
49    """The Jupyter notebook"""

Bioimage.io description of a Jupyter notebook.

implemented_type: ClassVar[Literal['notebook']] = 'notebook'
id: Optional[bioimageio.spec.notebook.v0_3.NotebookId]

bioimage.io-wide unique resource identifier assigned by bioimage.io; version unspecific.

parent: Optional[bioimageio.spec.notebook.v0_3.NotebookId]

The description from which this one is derived

source: Union[Annotated[bioimageio.spec._internal.url.HttpUrl, WithSuffix(suffix='.ipynb', case_sensitive=True)], Annotated[pathlib.Path, PathType(path_type='file'), Predicate(is_absolute), FieldInfo(annotation=NoneType, required=True, title='AbsoluteFilePath'), WithSuffix(suffix='.ipynb', case_sensitive=True)], Annotated[bioimageio.spec._internal.io.RelativeFilePath, WithSuffix(suffix='.ipynb', case_sensitive=True)]]

The Jupyter notebook

implemented_format_version_tuple: ClassVar[Tuple[int, int, int]] = (0, 3, 0)
def model_post_init(self: pydantic.main.BaseModel, context: Any, /) -> None:
124                    def wrapped_model_post_init(self: BaseModel, context: Any, /) -> None:
125                        """We need to both initialize private attributes and call the user-defined model_post_init
126                        method.
127                        """
128                        init_private_attributes(self, context)
129                        original_model_post_init(self, context)

We need to both initialize private attributes and call the user-defined model_post_init method.

ResourceDescr = typing.Union[typing.Annotated[typing.Union[typing.Annotated[typing.Union[typing.Annotated[bioimageio.spec.application.v0_2.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.2')], typing.Annotated[ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='application')], typing.Annotated[typing.Union[typing.Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], typing.Annotated[DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')], typing.Annotated[typing.Union[typing.Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], typing.Annotated[ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')], typing.Annotated[typing.Union[typing.Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.2')], typing.Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='notebook')]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], typing.Annotated[typing.Union[typing.Annotated[bioimageio.spec.generic.v0_2.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.2')], typing.Annotated[GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='generic')]]
def save_bioimageio_package_as_folder( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file'), FieldInfo(annotation=NoneType, required=True, title='FilePath')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)], zipfile.ZipFile, Dict[str, YamlValue], Annotated[Union[Annotated[Union[Annotated[bioimageio.spec.application.v0_2.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.2')], Annotated[ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='application')], Annotated[Union[Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], Annotated[DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')], Annotated[Union[Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], Annotated[ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')], Annotated[Union[Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.2')], Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='notebook')]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.generic.v0_2.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.2')], Annotated[GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='generic')]], /, *, output_path: Union[Annotated[pathlib.Path, PathType(path_type='new')], Annotated[pathlib.Path, PathType(path_type='dir')], NoneType] = None, weights_priority_order: Optional[Sequence[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript']]] = None) -> Annotated[pathlib.Path, PathType(path_type='dir')]:
120def save_bioimageio_package_as_folder(
121    source: Union[BioimageioYamlSource, ResourceDescr],
122    /,
123    *,
124    output_path: Union[NewPath, DirectoryPath, None] = None,
125    weights_priority_order: Optional[  # model only
126        Sequence[
127            Literal[
128                "keras_hdf5",
129                "onnx",
130                "pytorch_state_dict",
131                "tensorflow_js",
132                "tensorflow_saved_model_bundle",
133                "torchscript",
134            ]
135        ]
136    ] = None,
137) -> DirectoryPath:
138    """Write the content of a bioimage.io resource package to a folder.
139
140    Args:
141        source: bioimageio resource description
142        output_path: file path to write package to
143        weights_priority_order: If given only the first weights format present in the model is included.
144                                If none of the prioritized weights formats is found all are included.
145
146    Returns:
147        directory path to bioimageio package folder
148    """
149    package_content = _prepare_resource_package(
150        source,
151        weights_priority_order=weights_priority_order,
152    )
153    if output_path is None:
154        output_path = Path(mkdtemp())
155    else:
156        output_path = Path(output_path)
157
158    output_path.mkdir(exist_ok=True, parents=True)
159    for name, src in package_content.items():
160        if isinstance(src, collections.abc.Mapping):
161            write_yaml(src, output_path / name)
162        elif isinstance(src, ZipPath):
163            extracted = Path(src.root.extract(src.name, output_path))
164            if extracted.name != src.name:
165                try:
166                    shutil.move(str(extracted), output_path / src.name)
167                except Exception as e:
168                    raise RuntimeError(
169                        f"Failed to rename extracted file '{extracted.name}'"
170                        + f" to '{src.name}'."
171                        + f" (extracted from '{src.name}' in '{src.root.filename}')"
172                    ) from e
173        else:
174            try:
175                shutil.copy(src, output_path / name)
176            except shutil.SameFileError:
177                pass
178
179    return output_path

Write the content of a bioimage.io resource package to a folder.

Arguments:
  • source: bioimageio resource description
  • output_path: file path to write package to
  • weights_priority_order: If given only the first weights format present in the model is included. If none of the prioritized weights formats is found all are included.
Returns:

directory path to bioimageio package folder

def save_bioimageio_package_to_stream( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file'), FieldInfo(annotation=NoneType, required=True, title='FilePath')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)], zipfile.ZipFile, Dict[str, YamlValue], Annotated[Union[Annotated[Union[Annotated[bioimageio.spec.application.v0_2.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.2')], Annotated[ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='application')], Annotated[Union[Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], Annotated[DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')], Annotated[Union[Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], Annotated[ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')], Annotated[Union[Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.2')], Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='notebook')]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.generic.v0_2.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.2')], Annotated[GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='generic')]], /, *, compression: int = 8, compression_level: int = 1, output_stream: Optional[IO[bytes]] = None, weights_priority_order: Optional[Sequence[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript']]] = None) -> IO[bytes]:
243def save_bioimageio_package_to_stream(
244    source: Union[BioimageioYamlSource, ResourceDescr],
245    /,
246    *,
247    compression: int = ZIP_DEFLATED,
248    compression_level: int = 1,
249    output_stream: Union[IO[bytes], None] = None,
250    weights_priority_order: Optional[  # model only
251        Sequence[
252            Literal[
253                "keras_hdf5",
254                "onnx",
255                "pytorch_state_dict",
256                "tensorflow_js",
257                "tensorflow_saved_model_bundle",
258                "torchscript",
259            ]
260        ]
261    ] = None,
262) -> IO[bytes]:
263    """Package a bioimageio resource into a stream.
264
265    Args:
266        rd: bioimageio resource description
267        compression: The numeric constant of compression method.
268        compression_level: Compression level to use when writing files to the archive.
269                           See https://docs.python.org/3/library/zipfile.html#zipfile.ZipFile
270        output_stream: stream to write package to
271        weights_priority_order: If given only the first weights format present in the model is included.
272                                If none of the prioritized weights formats is found all are included.
273
274    Note: this function bypasses safety checks and does not load/validate the model after writing.
275
276    Returns:
277        stream of zipped bioimageio package
278    """
279    if output_stream is None:
280        output_stream = BytesIO()
281
282    package_content = _prepare_resource_package(
283        source,
284        weights_priority_order=weights_priority_order,
285    )
286
287    write_zip(
288        output_stream,
289        package_content,
290        compression=compression,
291        compression_level=compression_level,
292    )
293
294    return output_stream

Package a bioimageio resource into a stream.

Arguments:
  • rd: bioimageio resource description
  • compression: The numeric constant of compression method.
  • compression_level: Compression level to use when writing files to the archive. See https://docs.python.org/3/library/zipfile.html#zipfile.ZipFile
  • output_stream: stream to write package to
  • weights_priority_order: If given only the first weights format present in the model is included. If none of the prioritized weights formats is found all are included.

Note: this function bypasses safety checks and does not load/validate the model after writing.

Returns:

stream of zipped bioimageio package

def save_bioimageio_package( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file'), FieldInfo(annotation=NoneType, required=True, title='FilePath')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)], zipfile.ZipFile, Dict[str, YamlValue], Annotated[Union[Annotated[Union[Annotated[bioimageio.spec.application.v0_2.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.2')], Annotated[ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='application')], Annotated[Union[Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], Annotated[DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')], Annotated[Union[Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], Annotated[ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')], Annotated[Union[Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.2')], Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='notebook')]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.generic.v0_2.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.2')], Annotated[GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='generic')]], /, *, compression: int = 8, compression_level: int = 1, output_path: Union[Annotated[pathlib.Path, PathType(path_type='new')], Annotated[pathlib.Path, PathType(path_type='file')], NoneType] = None, weights_priority_order: Optional[Sequence[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript']]] = None) -> Annotated[pathlib.Path, PathType(path_type='file')]:
182def save_bioimageio_package(
183    source: Union[BioimageioYamlSource, ResourceDescr],
184    /,
185    *,
186    compression: int = ZIP_DEFLATED,
187    compression_level: int = 1,
188    output_path: Union[NewPath, FilePath, None] = None,
189    weights_priority_order: Optional[  # model only
190        Sequence[
191            Literal[
192                "keras_hdf5",
193                "onnx",
194                "pytorch_state_dict",
195                "tensorflow_js",
196                "tensorflow_saved_model_bundle",
197                "torchscript",
198            ]
199        ]
200    ] = None,
201) -> FilePath:
202    """Package a bioimageio resource as a zip file.
203
204    Args:
205        rd: bioimageio resource description
206        compression: The numeric constant of compression method.
207        compression_level: Compression level to use when writing files to the archive.
208                           See https://docs.python.org/3/library/zipfile.html#zipfile.ZipFile
209        output_path: file path to write package to
210        weights_priority_order: If given only the first weights format present in the model is included.
211                                If none of the prioritized weights formats is found all are included.
212
213    Returns:
214        path to zipped bioimageio package
215    """
216    package_content = _prepare_resource_package(
217        source,
218        weights_priority_order=weights_priority_order,
219    )
220    if output_path is None:
221        output_path = Path(
222            NamedTemporaryFile(suffix=".bioimageio.zip", delete=False).name
223        )
224    else:
225        output_path = Path(output_path)
226
227    write_zip(
228        output_path,
229        package_content,
230        compression=compression,
231        compression_level=compression_level,
232    )
233    with get_validation_context().replace(warning_level=ERROR):
234        if isinstance((exported := load_description(output_path)), InvalidDescr):
235            raise ValueError(
236                f"Exported package '{output_path}' is invalid:"
237                + f" {exported.validation_summary}"
238            )
239
240    return output_path

Package a bioimageio resource as a zip file.

Arguments:
  • rd: bioimageio resource description
  • compression: The numeric constant of compression method.
  • compression_level: Compression level to use when writing files to the archive. See https://docs.python.org/3/library/zipfile.html#zipfile.ZipFile
  • output_path: file path to write package to
  • weights_priority_order: If given only the first weights format present in the model is included. If none of the prioritized weights formats is found all are included.
Returns:

path to zipped bioimageio package

def save_bioimageio_yaml_only( rd: Union[Annotated[Union[Annotated[Union[Annotated[bioimageio.spec.application.v0_2.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.2')], Annotated[ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='application')], Annotated[Union[Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], Annotated[DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')], Annotated[Union[Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], Annotated[ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')], Annotated[Union[Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.2')], Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='notebook')]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.generic.v0_2.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.2')], Annotated[GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='generic')], Dict[str, YamlValue], InvalidDescr], /, file: Union[Annotated[pathlib.Path, PathType(path_type='new')], Annotated[pathlib.Path, PathType(path_type='file')], TextIO], *, exclude_unset: bool = True, exclude_defaults: bool = False):
199def save_bioimageio_yaml_only(
200    rd: Union[ResourceDescr, BioimageioYamlContent, InvalidDescr],
201    /,
202    file: Union[NewPath, FilePath, TextIO],
203    *,
204    exclude_unset: bool = True,
205    exclude_defaults: bool = False,
206):
207    """write the metadata of a resource description (`rd`) to `file`
208    without writing any of the referenced files in it.
209
210    Args:
211        rd: bioimageio resource description
212        file: file or stream to save to
213        exclude_unset: Exclude fields that have not explicitly be set.
214        exclude_defaults: Exclude fields that have the default value (even if set explicitly).
215
216    Note: To save a resource description with its associated files as a package,
217    use `save_bioimageio_package` or `save_bioimageio_package_as_folder`.
218    """
219    if isinstance(rd, ResourceDescrBase):
220        content = dump_description(
221            rd, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults
222        )
223    else:
224        content = rd
225
226    write_yaml(cast(YamlValue, content), file)

write the metadata of a resource description (rd) to file without writing any of the referenced files in it.

Arguments:
  • rd: bioimageio resource description
  • file: file or stream to save to
  • exclude_unset: Exclude fields that have not explicitly be set.
  • exclude_defaults: Exclude fields that have the default value (even if set explicitly).

Note: To save a resource description with its associated files as a package, use save_bioimageio_package or save_bioimageio_package_as_folder.

settings = Settings(cache_path=PosixPath('/home/runner/.cache/bioimageio'), collection_http_pattern='https://hypha.aicell.io/bioimage-io/artifacts/{bioimageio_id}/files/rdf.yaml', id_map='https://uk1s3.embassy.ebi.ac.uk/public-datasets/bioimage.io/id_map.json', id_map_draft='https://uk1s3.embassy.ebi.ac.uk/public-datasets/bioimage.io/id_map_draft.json', resolve_draft=True, perform_io_checks=True, log_warnings=True, github_username=None, github_token=None, CI='true', user_agent=None)
SpecificResourceDescr = typing.Annotated[typing.Union[typing.Annotated[typing.Union[typing.Annotated[bioimageio.spec.application.v0_2.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.2')], typing.Annotated[ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='application')], typing.Annotated[typing.Union[typing.Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], typing.Annotated[DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')], typing.Annotated[typing.Union[typing.Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], typing.Annotated[ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')], typing.Annotated[typing.Union[typing.Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.2')], typing.Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='notebook')]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)]
def update_format( source: Union[Annotated[Union[Annotated[Union[Annotated[bioimageio.spec.application.v0_2.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.2')], Annotated[ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='application')], Annotated[Union[Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], Annotated[DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')], Annotated[Union[Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], Annotated[ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')], Annotated[Union[Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.2')], Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='notebook')]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.generic.v0_2.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.2')], Annotated[GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='generic')], Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file'), FieldInfo(annotation=NoneType, required=True, title='FilePath')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)], zipfile.ZipFile, Dict[str, YamlValue], InvalidDescr], /, *, output: Union[pathlib.Path, TextIO, NoneType] = None, exclude_defaults: bool = True, perform_io_checks: Optional[bool] = None) -> Union[Annotated[Union[ApplicationDescr, DatasetDescr, ModelDescr, NotebookDescr], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], GenericDescr, InvalidDescr]:
255def update_format(
256    source: Union[
257        ResourceDescr,
258        PermissiveFileSource,
259        ZipFile,
260        BioimageioYamlContent,
261        InvalidDescr,
262    ],
263    /,
264    *,
265    output: Union[Path, TextIO, None] = None,
266    exclude_defaults: bool = True,
267    perform_io_checks: Optional[bool] = None,
268) -> Union[LatestResourceDescr, InvalidDescr]:
269    """Update a resource description.
270
271    Notes:
272    - Invalid **source** descriptions may fail to update.
273    - The updated description might be invalid (even if the **source** was valid).
274    """
275
276    if isinstance(source, ResourceDescrBase):
277        root = source.root
278        source = dump_description(source)
279    else:
280        root = None
281
282    if isinstance(source, dict):
283        descr = build_description(
284            source,
285            context=get_validation_context().replace(
286                root=root, perform_io_checks=perform_io_checks
287            ),
288            format_version=LATEST,
289        )
290
291    else:
292        descr = load_description(
293            source,
294            perform_io_checks=perform_io_checks,
295            format_version=LATEST,
296        )
297
298    if output is not None:
299        save_bioimageio_yaml_only(descr, file=output, exclude_defaults=exclude_defaults)
300
301    return descr

Update a resource description.

Notes:

  • Invalid source descriptions may fail to update.
  • The updated description might be invalid (even if the source was valid).
def update_hashes( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file'), FieldInfo(annotation=NoneType, required=True, title='FilePath')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)], zipfile.ZipFile, Annotated[Union[Annotated[Union[Annotated[bioimageio.spec.application.v0_2.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.2')], Annotated[ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='application')], Annotated[Union[Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], Annotated[DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')], Annotated[Union[Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], Annotated[ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')], Annotated[Union[Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.2')], Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='notebook')]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.generic.v0_2.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.2')], Annotated[GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='generic')], Dict[str, YamlValue]], /) -> Union[Annotated[Union[Annotated[Union[Annotated[bioimageio.spec.application.v0_2.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.2')], Annotated[ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='application')], Annotated[Union[Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], Annotated[DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')], Annotated[Union[Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], Annotated[ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')], Annotated[Union[Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.2')], Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='notebook')]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.generic.v0_2.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.2')], Annotated[GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='generic')], InvalidDescr]:
304def update_hashes(
305    source: Union[PermissiveFileSource, ZipFile, ResourceDescr, BioimageioYamlContent],
306    /,
307) -> Union[ResourceDescr, InvalidDescr]:
308    """Update hash values of the files referenced in **source**."""
309    if isinstance(source, ResourceDescrBase):
310        root = source.root
311        source = dump_description(source)
312    else:
313        root = None
314
315    context = get_validation_context().replace(
316        update_hashes=True, root=root, perform_io_checks=True
317    )
318    with context:
319        if isinstance(source, dict):
320            return build_description(source)
321        else:
322            return load_description(source, perform_io_checks=True)

Update hash values of the files referenced in source.

def validate_format( data: Dict[str, YamlValue], /, *, format_version: Union[Literal['latest', 'discover'], str] = 'discover', context: Optional[ValidationContext] = None) -> ValidationSummary:
204def validate_format(
205    data: BioimageioYamlContent,
206    /,
207    *,
208    format_version: Union[Literal["discover", "latest"], str] = DISCOVER,
209    context: Optional[ValidationContext] = None,
210) -> ValidationSummary:
211    """Validate a dictionary holding a bioimageio description.
212    See `bioimagieo.spec.load_description_and_validate_format_only`
213    to validate a file source.
214
215    Args:
216        data: Dictionary holding the raw bioimageio.yaml content.
217        format_version: Format version to (update to and) use for validation.
218        context: Validation context, see `bioimagieo.spec.ValidationContext`
219
220    Note:
221        Use `bioimagieo.spec.load_description_and_validate_format_only` to validate a
222        file source instead of loading the YAML content and creating the appropriate
223        `ValidationContext`.
224
225        Alternatively you can use `bioimagieo.spec.load_description` and access the
226        `validation_summary` attribute of the returned object.
227    """
228    with context or get_validation_context():
229        rd = build_description(data, format_version=format_version)
230
231    assert rd.validation_summary is not None
232    return rd.validation_summary

Validate a dictionary holding a bioimageio description. See bioimagieo.spec.load_description_and_validate_format_only to validate a file source.

Arguments:
  • data: Dictionary holding the raw bioimageio.yaml content.
  • format_version: Format version to (update to and) use for validation.
  • context: Validation context, see bioimagieo.spec.ValidationContext
Note:

Use bioimagieo.spec.load_description_and_validate_format_only to validate a file source instead of loading the YAML content and creating the appropriate ValidationContext.

Alternatively you can use bioimagieo.spec.load_description and access the validation_summary attribute of the returned object.

@dataclass(frozen=True)
class ValidationContext(bioimageio.spec._internal.validation_context.ValidationContextBase):
 49@dataclass(frozen=True)
 50class ValidationContext(ValidationContextBase):
 51    """A validation context used to control validation of bioimageio resources.
 52
 53    For example a relative file path in a bioimageio description requires the **root**
 54    context to evaluate if the file is available and, if **perform_io_checks** is true,
 55    if it matches its expected SHA256 hash value.
 56    """
 57
 58    _context_tokens: "List[Token[Optional[ValidationContext]]]" = field(
 59        init=False, default_factory=list
 60    )
 61
 62    root: Union[RootHttpUrl, DirectoryPath, ZipFile] = Path()
 63    """Url/directory/archive serving as base to resolve any relative file paths."""
 64
 65    warning_level: WarningLevel = 50
 66    """Treat warnings of severity `s` as validation errors if `s >= warning_level`."""
 67
 68    log_warnings: bool = settings.log_warnings
 69    """If `True` warnings are logged to the terminal
 70
 71    Note: This setting does not affect warning entries
 72        of a generated `bioimageio.spec.ValidationSummary`.
 73    """
 74
 75    raise_errors: bool = False
 76    """Directly raise any validation errors
 77    instead of aggregating errors and returning a `bioimageio.spec.InvalidDescr`. (for debugging)"""
 78
 79    @property
 80    def summary(self):
 81        if isinstance(self.root, ZipFile):
 82            if self.root.filename is None:
 83                root = "in-memory"
 84            else:
 85                root = Path(self.root.filename)
 86        else:
 87            root = self.root
 88
 89        return ValidationContextSummary(
 90            root=root,
 91            file_name=self.file_name,
 92            perform_io_checks=self.perform_io_checks,
 93            known_files=dict(self.known_files),
 94            update_hashes=self.update_hashes,
 95        )
 96
 97    def __enter__(self):
 98        self._context_tokens.append(_validation_context_var.set(self))
 99        return self
100
101    def __exit__(self, type, value, traceback):  # type: ignore
102        _validation_context_var.reset(self._context_tokens.pop(-1))
103
104    def replace(  # TODO: probably use __replace__ when py>=3.13
105        self,
106        root: Optional[Union[RootHttpUrl, DirectoryPath, ZipFile]] = None,
107        warning_level: Optional[WarningLevel] = None,
108        log_warnings: Optional[bool] = None,
109        file_name: Optional[str] = None,
110        perform_io_checks: Optional[bool] = None,
111        known_files: Optional[Dict[str, Sha256]] = None,
112        raise_errors: Optional[bool] = None,
113        update_hashes: Optional[bool] = None,
114    ) -> Self:
115        if known_files is None and root is not None and self.root != root:
116            # reset known files if root changes, but no new known_files are given
117            known_files = {}
118
119        return self.__class__(
120            root=self.root if root is None else root,
121            warning_level=(
122                self.warning_level if warning_level is None else warning_level
123            ),
124            log_warnings=self.log_warnings if log_warnings is None else log_warnings,
125            file_name=self.file_name if file_name is None else file_name,
126            perform_io_checks=(
127                self.perform_io_checks
128                if perform_io_checks is None
129                else perform_io_checks
130            ),
131            known_files=self.known_files if known_files is None else known_files,
132            raise_errors=self.raise_errors if raise_errors is None else raise_errors,
133            update_hashes=(
134                self.update_hashes if update_hashes is None else update_hashes
135            ),
136        )
137
138    @property
139    def source_name(self) -> str:
140        if self.file_name is None:
141            return "in-memory"
142        else:
143            try:
144                if isinstance(self.root, Path):
145                    source = (self.root / self.file_name).absolute()
146                else:
147                    parsed = urlsplit(str(self.root))
148                    path = list(parsed.path.strip("/").split("/")) + [self.file_name]
149                    source = urlunsplit(
150                        (
151                            parsed.scheme,
152                            parsed.netloc,
153                            "/".join(path),
154                            parsed.query,
155                            parsed.fragment,
156                        )
157                    )
158            except ValueError:
159                return self.file_name
160            else:
161                return str(source)

A validation context used to control validation of bioimageio resources.

For example a relative file path in a bioimageio description requires the root context to evaluate if the file is available and, if perform_io_checks is true, if it matches its expected SHA256 hash value.

ValidationContext( file_name: Optional[str] = None, perform_io_checks: bool = True, known_files: Dict[str, bioimageio.spec._internal.io_basics.Sha256] = <factory>, update_hashes: bool = False, root: Union[bioimageio.spec._internal.root_url.RootHttpUrl, Annotated[pathlib.Path, PathType(path_type='dir')], zipfile.ZipFile] = PosixPath('.'), warning_level: Literal[20, 30, 35, 50] = 50, log_warnings: bool = True, raise_errors: bool = False)
root: Union[bioimageio.spec._internal.root_url.RootHttpUrl, Annotated[pathlib.Path, PathType(path_type='dir')], zipfile.ZipFile] = PosixPath('.')

Url/directory/archive serving as base to resolve any relative file paths.

warning_level: Literal[20, 30, 35, 50] = 50

Treat warnings of severity s as validation errors if s >= warning_level.

log_warnings: bool = True

If True warnings are logged to the terminal

Note: This setting does not affect warning entries of a generated bioimageio.spec.ValidationSummary.

raise_errors: bool = False

Directly raise any validation errors instead of aggregating errors and returning a bioimageio.spec.InvalidDescr. (for debugging)

summary
79    @property
80    def summary(self):
81        if isinstance(self.root, ZipFile):
82            if self.root.filename is None:
83                root = "in-memory"
84            else:
85                root = Path(self.root.filename)
86        else:
87            root = self.root
88
89        return ValidationContextSummary(
90            root=root,
91            file_name=self.file_name,
92            perform_io_checks=self.perform_io_checks,
93            known_files=dict(self.known_files),
94            update_hashes=self.update_hashes,
95        )
def replace( self, root: Union[bioimageio.spec._internal.root_url.RootHttpUrl, Annotated[pathlib.Path, PathType(path_type='dir')], zipfile.ZipFile, NoneType] = None, warning_level: Optional[Literal[20, 30, 35, 50]] = None, log_warnings: Optional[bool] = None, file_name: Optional[str] = None, perform_io_checks: Optional[bool] = None, known_files: Optional[Dict[str, bioimageio.spec._internal.io_basics.Sha256]] = None, raise_errors: Optional[bool] = None, update_hashes: Optional[bool] = None) -> Self:
104    def replace(  # TODO: probably use __replace__ when py>=3.13
105        self,
106        root: Optional[Union[RootHttpUrl, DirectoryPath, ZipFile]] = None,
107        warning_level: Optional[WarningLevel] = None,
108        log_warnings: Optional[bool] = None,
109        file_name: Optional[str] = None,
110        perform_io_checks: Optional[bool] = None,
111        known_files: Optional[Dict[str, Sha256]] = None,
112        raise_errors: Optional[bool] = None,
113        update_hashes: Optional[bool] = None,
114    ) -> Self:
115        if known_files is None and root is not None and self.root != root:
116            # reset known files if root changes, but no new known_files are given
117            known_files = {}
118
119        return self.__class__(
120            root=self.root if root is None else root,
121            warning_level=(
122                self.warning_level if warning_level is None else warning_level
123            ),
124            log_warnings=self.log_warnings if log_warnings is None else log_warnings,
125            file_name=self.file_name if file_name is None else file_name,
126            perform_io_checks=(
127                self.perform_io_checks
128                if perform_io_checks is None
129                else perform_io_checks
130            ),
131            known_files=self.known_files if known_files is None else known_files,
132            raise_errors=self.raise_errors if raise_errors is None else raise_errors,
133            update_hashes=(
134                self.update_hashes if update_hashes is None else update_hashes
135            ),
136        )
source_name: str
138    @property
139    def source_name(self) -> str:
140        if self.file_name is None:
141            return "in-memory"
142        else:
143            try:
144                if isinstance(self.root, Path):
145                    source = (self.root / self.file_name).absolute()
146                else:
147                    parsed = urlsplit(str(self.root))
148                    path = list(parsed.path.strip("/").split("/")) + [self.file_name]
149                    source = urlunsplit(
150                        (
151                            parsed.scheme,
152                            parsed.netloc,
153                            "/".join(path),
154                            parsed.query,
155                            parsed.fragment,
156                        )
157                    )
158            except ValueError:
159                return self.file_name
160            else:
161                return str(source)
class ValidationSummary(pydantic.main.BaseModel):
205class ValidationSummary(BaseModel, extra="allow"):
206    """Summarizes output of all bioimageio validations and tests
207    for one specific `ResourceDescr` instance."""
208
209    name: str
210    """name of the validation"""
211    source_name: str
212    """source of the validated bioimageio description"""
213    id: Optional[str] = None
214    """ID of the resource being validated"""
215    type: str
216    """type of the resource being validated"""
217    format_version: str
218    """format version of the resource being validated"""
219    status: Literal["passed", "valid-format", "failed"]
220    """overall status of the bioimageio validation"""
221    details: NotEmpty[List[ValidationDetail]]
222    """list of validation details"""
223    env: Set[InstalledPackage] = Field(
224        default_factory=lambda: {
225            InstalledPackage(name="bioimageio.spec", version=VERSION)
226        }
227    )
228    """list of selected, relevant package versions"""
229
230    conda_list: Optional[Sequence[InstalledPackage]] = None
231    """parsed output of conda list"""
232
233    @property
234    def status_icon(self):
235        if self.status == "passed":
236            return "✔️"
237        elif self.status == "valid-format":
238            return "🟡"
239        else:
240            return "❌"
241
242    @property
243    def errors(self) -> List[ErrorEntry]:
244        return list(chain.from_iterable(d.errors for d in self.details))
245
246    @property
247    def warnings(self) -> List[WarningEntry]:
248        return list(chain.from_iterable(d.warnings for d in self.details))
249
250    def format(
251        self,
252        hide_tracebacks: bool = False,
253        hide_source: bool = False,
254        hide_env: bool = False,
255    ):
256        """Format summary as Markdown string"""
257        return self._format(
258            hide_tracebacks=hide_tracebacks,
259            hide_source=hide_source,
260            hide_env=hide_env,
261            target="md",
262        )
263
264    format_md = format
265
266    def format_html(self):
267        md_with_html = self._format(target="html")
268        return markdown.markdown(
269            md_with_html, extensions=["tables", "fenced_code", "nl2br"]
270        )
271
272    # TODO: fix bug which casuses extensive white space between the info table and details table
273    # (the generated markdown seems fine)
274    @no_type_check
275    def display(self) -> None:
276        try:  # render as HTML in Jupyter notebook
277            from IPython.core.getipython import get_ipython
278            from IPython.display import display_html
279        except ImportError:
280            pass
281        else:
282            if get_ipython() is not None:
283                _ = display_html(self.format_html(), raw=True)
284                return
285
286        # render with rich
287        self._format(target=rich.console.Console())
288
289    def add_detail(self, detail: ValidationDetail):
290        if detail.status == "failed":
291            self.status = "failed"
292        elif detail.status != "passed":
293            assert_never(detail.status)
294
295        self.details.append(detail)
296
297    def log(
298        self,
299        to: Union[Literal["display"], Path, Sequence[Union[Literal["display"], Path]]],
300    ) -> List[Path]:
301        """Convenience method to display the validation summary in the terminal and/or
302        save it to disk. See `save` for details."""
303        if to == "display":
304            display = True
305            save_to = []
306        elif isinstance(to, Path):
307            display = False
308            save_to = [to]
309        else:
310            display = "display" in to
311            save_to = [p for p in to if p != "display"]
312
313        if display:
314            self.display()
315
316        return self.save(save_to)
317
318    def save(
319        self, path: Union[Path, Sequence[Path]] = Path("{id}_summary_{now}")
320    ) -> List[Path]:
321        """Save the validation/test summary in JSON, Markdown or HTML format.
322
323        Returns:
324            List of file paths the summary was saved to.
325
326        Notes:
327        - Format is chosen based on the suffix: `.json`, `.md`, `.html`.
328        - If **path** has no suffix it is assumed to be a direcotry to which a
329          `summary.json`, `summary.md` and `summary.html` are saved to.
330        """
331        if isinstance(path, (str, Path)):
332            path = [Path(path)]
333
334        # folder to file paths
335        file_paths: List[Path] = []
336        for p in path:
337            if p.suffix:
338                file_paths.append(p)
339            else:
340                file_paths.extend(
341                    [
342                        p / "summary.json",
343                        p / "summary.md",
344                        p / "summary.html",
345                    ]
346                )
347
348        now = datetime.now(timezone.utc).strftime("%Y%m%dT%H%M%SZ")
349        for p in file_paths:
350            p = Path(str(p).format(id=self.id or "bioimageio", now=now))
351            if p.suffix == ".json":
352                self.save_json(p)
353            elif p.suffix == ".md":
354                self.save_markdown(p)
355            elif p.suffix == ".html":
356                self.save_html(p)
357            else:
358                raise ValueError(f"Unknown summary path suffix '{p.suffix}'")
359
360        return file_paths
361
362    def save_json(
363        self, path: Path = Path("summary.json"), *, indent: Optional[int] = 2
364    ):
365        """Save validation/test summary as JSON file."""
366        json_str = self.model_dump_json(indent=indent)
367        path.parent.mkdir(exist_ok=True)
368        _ = path.write_text(json_str, encoding="utf-8")
369        logger.info("Saved summary to {}", path.absolute())
370
371    def save_markdown(self, path: Path = Path("summary.md")):
372        """Save rendered validation/test summary as Markdown file."""
373        formatted = self.format_md()
374        path.parent.mkdir(exist_ok=True)
375        _ = path.write_text(formatted, encoding="utf-8")
376        logger.info("Saved Markdown formatted summary to {}", path.absolute())
377
378    def save_html(self, path: Path = Path("summary.html")) -> None:
379        """Save rendered validation/test summary as HTML file."""
380        path.parent.mkdir(exist_ok=True)
381
382        html = self.format_html()
383        _ = path.write_text(html, encoding="utf-8")
384        logger.info("Saved HTML formatted summary to {}", path.absolute())
385
386    def load_json(self, path: Path) -> Self:
387        """Load validation/test summary from a suitable JSON file"""
388        json_str = path.read_text(encoding="utf-8")
389        return self.model_validate_json(json_str)
390
391    @field_validator("env", mode="before")
392    def _convert_dict(cls, value: List[Union[List[str], Dict[str, str]]]):
393        """convert old env value for backwards compatibility"""
394        if isinstance(value, list):
395            return [
396                (
397                    (v["name"], v["version"], v.get("build", ""), v.get("channel", ""))
398                    if isinstance(v, dict) and "name" in v and "version" in v
399                    else v
400                )
401                for v in value
402            ]
403        else:
404            return value
405
406    def _format(
407        self,
408        *,
409        hide_tracebacks: bool = False,
410        hide_source: bool = False,
411        hide_env: bool = False,
412        target: Union[rich.console.Console, Literal["html", "md"]],
413    ):
414        return _format_summary(
415            self,
416            hide_tracebacks=hide_tracebacks,
417            hide_source=hide_source,
418            hide_env=hide_env,
419            target=target,
420        )

Summarizes output of all bioimageio validations and tests for one specific ResourceDescr instance.

name: str

name of the validation

source_name: str

source of the validated bioimageio description

id: Optional[str]

ID of the resource being validated

type: str

type of the resource being validated

format_version: str

format version of the resource being validated

status: Literal['passed', 'valid-format', 'failed']

overall status of the bioimageio validation

details: Annotated[List[bioimageio.spec.summary.ValidationDetail], MinLen(min_length=1)]

list of validation details

list of selected, relevant package versions

conda_list: Optional[Sequence[bioimageio.spec.summary.InstalledPackage]]

parsed output of conda list

status_icon
233    @property
234    def status_icon(self):
235        if self.status == "passed":
236            return "✔️"
237        elif self.status == "valid-format":
238            return "🟡"
239        else:
240            return "❌"
errors: List[bioimageio.spec.summary.ErrorEntry]
242    @property
243    def errors(self) -> List[ErrorEntry]:
244        return list(chain.from_iterable(d.errors for d in self.details))
warnings: List[bioimageio.spec.summary.WarningEntry]
246    @property
247    def warnings(self) -> List[WarningEntry]:
248        return list(chain.from_iterable(d.warnings for d in self.details))
def format( self, hide_tracebacks: bool = False, hide_source: bool = False, hide_env: bool = False):
250    def format(
251        self,
252        hide_tracebacks: bool = False,
253        hide_source: bool = False,
254        hide_env: bool = False,
255    ):
256        """Format summary as Markdown string"""
257        return self._format(
258            hide_tracebacks=hide_tracebacks,
259            hide_source=hide_source,
260            hide_env=hide_env,
261            target="md",
262        )

Format summary as Markdown string

def format_md( self, hide_tracebacks: bool = False, hide_source: bool = False, hide_env: bool = False):
250    def format(
251        self,
252        hide_tracebacks: bool = False,
253        hide_source: bool = False,
254        hide_env: bool = False,
255    ):
256        """Format summary as Markdown string"""
257        return self._format(
258            hide_tracebacks=hide_tracebacks,
259            hide_source=hide_source,
260            hide_env=hide_env,
261            target="md",
262        )

Format summary as Markdown string

def format_html(self):
266    def format_html(self):
267        md_with_html = self._format(target="html")
268        return markdown.markdown(
269            md_with_html, extensions=["tables", "fenced_code", "nl2br"]
270        )
@no_type_check
def display(self) -> None:
274    @no_type_check
275    def display(self) -> None:
276        try:  # render as HTML in Jupyter notebook
277            from IPython.core.getipython import get_ipython
278            from IPython.display import display_html
279        except ImportError:
280            pass
281        else:
282            if get_ipython() is not None:
283                _ = display_html(self.format_html(), raw=True)
284                return
285
286        # render with rich
287        self._format(target=rich.console.Console())
def add_detail(self, detail: bioimageio.spec.summary.ValidationDetail):
289    def add_detail(self, detail: ValidationDetail):
290        if detail.status == "failed":
291            self.status = "failed"
292        elif detail.status != "passed":
293            assert_never(detail.status)
294
295        self.details.append(detail)
def log( self, to: Union[Literal['display'], pathlib.Path, Sequence[Union[Literal['display'], pathlib.Path]]]) -> List[pathlib.Path]:
297    def log(
298        self,
299        to: Union[Literal["display"], Path, Sequence[Union[Literal["display"], Path]]],
300    ) -> List[Path]:
301        """Convenience method to display the validation summary in the terminal and/or
302        save it to disk. See `save` for details."""
303        if to == "display":
304            display = True
305            save_to = []
306        elif isinstance(to, Path):
307            display = False
308            save_to = [to]
309        else:
310            display = "display" in to
311            save_to = [p for p in to if p != "display"]
312
313        if display:
314            self.display()
315
316        return self.save(save_to)

Convenience method to display the validation summary in the terminal and/or save it to disk. See save for details.

def save( self, path: Union[pathlib.Path, Sequence[pathlib.Path]] = PosixPath('{id}_summary_{now}')) -> List[pathlib.Path]:
318    def save(
319        self, path: Union[Path, Sequence[Path]] = Path("{id}_summary_{now}")
320    ) -> List[Path]:
321        """Save the validation/test summary in JSON, Markdown or HTML format.
322
323        Returns:
324            List of file paths the summary was saved to.
325
326        Notes:
327        - Format is chosen based on the suffix: `.json`, `.md`, `.html`.
328        - If **path** has no suffix it is assumed to be a direcotry to which a
329          `summary.json`, `summary.md` and `summary.html` are saved to.
330        """
331        if isinstance(path, (str, Path)):
332            path = [Path(path)]
333
334        # folder to file paths
335        file_paths: List[Path] = []
336        for p in path:
337            if p.suffix:
338                file_paths.append(p)
339            else:
340                file_paths.extend(
341                    [
342                        p / "summary.json",
343                        p / "summary.md",
344                        p / "summary.html",
345                    ]
346                )
347
348        now = datetime.now(timezone.utc).strftime("%Y%m%dT%H%M%SZ")
349        for p in file_paths:
350            p = Path(str(p).format(id=self.id or "bioimageio", now=now))
351            if p.suffix == ".json":
352                self.save_json(p)
353            elif p.suffix == ".md":
354                self.save_markdown(p)
355            elif p.suffix == ".html":
356                self.save_html(p)
357            else:
358                raise ValueError(f"Unknown summary path suffix '{p.suffix}'")
359
360        return file_paths

Save the validation/test summary in JSON, Markdown or HTML format.

Returns:

List of file paths the summary was saved to.

Notes:

  • Format is chosen based on the suffix: .json, .md, .html.
  • If path has no suffix it is assumed to be a direcotry to which a summary.json, summary.md and summary.html are saved to.
def save_json( self, path: pathlib.Path = PosixPath('summary.json'), *, indent: Optional[int] = 2):
362    def save_json(
363        self, path: Path = Path("summary.json"), *, indent: Optional[int] = 2
364    ):
365        """Save validation/test summary as JSON file."""
366        json_str = self.model_dump_json(indent=indent)
367        path.parent.mkdir(exist_ok=True)
368        _ = path.write_text(json_str, encoding="utf-8")
369        logger.info("Saved summary to {}", path.absolute())

Save validation/test summary as JSON file.

def save_markdown(self, path: pathlib.Path = PosixPath('summary.md')):
371    def save_markdown(self, path: Path = Path("summary.md")):
372        """Save rendered validation/test summary as Markdown file."""
373        formatted = self.format_md()
374        path.parent.mkdir(exist_ok=True)
375        _ = path.write_text(formatted, encoding="utf-8")
376        logger.info("Saved Markdown formatted summary to {}", path.absolute())

Save rendered validation/test summary as Markdown file.

def save_html(self, path: pathlib.Path = PosixPath('summary.html')) -> None:
378    def save_html(self, path: Path = Path("summary.html")) -> None:
379        """Save rendered validation/test summary as HTML file."""
380        path.parent.mkdir(exist_ok=True)
381
382        html = self.format_html()
383        _ = path.write_text(html, encoding="utf-8")
384        logger.info("Saved HTML formatted summary to {}", path.absolute())

Save rendered validation/test summary as HTML file.

def load_json(self, path: pathlib.Path) -> Self:
386    def load_json(self, path: Path) -> Self:
387        """Load validation/test summary from a suitable JSON file"""
388        json_str = path.read_text(encoding="utf-8")
389        return self.model_validate_json(json_str)

Load validation/test summary from a suitable JSON file