bioimageio.spec

  1"""
  2.. include:: ../../README.md
  3"""
  4
  5# ruff: noqa: E402
  6from loguru import logger
  7
  8logger.disable("bioimageio.spec")
  9
 10from . import (
 11    application,
 12    common,
 13    conda_env,
 14    dataset,
 15    generic,
 16    model,
 17    pretty_validation_errors,
 18    summary,
 19    utils,
 20)
 21from ._description import (
 22    LatestResourceDescr,
 23    ResourceDescr,
 24    SpecificResourceDescr,
 25    build_description,
 26    dump_description,
 27    validate_format,
 28)
 29from ._get_conda_env import BioimageioCondaEnv, get_conda_env
 30from ._internal import settings
 31from ._internal.common_nodes import InvalidDescr
 32from ._internal.validation_context import ValidationContext, get_validation_context
 33from ._io import (
 34    load_dataset_description,
 35    load_description,
 36    load_description_and_validate_format_only,
 37    load_model_description,
 38    save_bioimageio_yaml_only,
 39    update_format,
 40    update_hashes,
 41)
 42from ._package import (
 43    get_resource_package_content,
 44    save_bioimageio_package,
 45    save_bioimageio_package_as_folder,
 46    save_bioimageio_package_to_stream,
 47)
 48from ._upload import upload
 49from ._version import VERSION as __version__
 50from .application import AnyApplicationDescr, ApplicationDescr
 51from .dataset import AnyDatasetDescr, DatasetDescr
 52from .generic import AnyGenericDescr, GenericDescr
 53from .model import AnyModelDescr, ModelDescr
 54from .notebook import AnyNotebookDescr, NotebookDescr
 55from .pretty_validation_errors import enable_pretty_validation_errors_in_ipynb
 56from .summary import ValidationSummary
 57
 58__all__ = [
 59    "__version__",
 60    "AnyApplicationDescr",
 61    "AnyDatasetDescr",
 62    "AnyGenericDescr",
 63    "AnyModelDescr",
 64    "AnyNotebookDescr",
 65    "application",
 66    "ApplicationDescr",
 67    "BioimageioCondaEnv",
 68    "build_description",
 69    "common",
 70    "conda_env",
 71    "dataset",
 72    "DatasetDescr",
 73    "dump_description",
 74    "enable_pretty_validation_errors_in_ipynb",
 75    "generic",
 76    "GenericDescr",
 77    "get_conda_env",
 78    "get_resource_package_content",
 79    "get_validation_context",
 80    "InvalidDescr",
 81    "LatestResourceDescr",
 82    "load_dataset_description",
 83    "load_description_and_validate_format_only",
 84    "load_description",
 85    "load_model_description",
 86    "model",
 87    "ModelDescr",
 88    "NotebookDescr",
 89    "pretty_validation_errors",
 90    "ResourceDescr",
 91    "save_bioimageio_package_as_folder",
 92    "save_bioimageio_package_to_stream",
 93    "save_bioimageio_package",
 94    "save_bioimageio_yaml_only",
 95    "settings",
 96    "SpecificResourceDescr",
 97    "summary",
 98    "update_format",
 99    "update_hashes",
100    "upload",
101    "utils",
102    "validate_format",
103    "ValidationContext",
104    "ValidationSummary",
105]
__version__ = '0.5.6.0'
AnyApplicationDescr = typing.Annotated[typing.Union[typing.Annotated[bioimageio.spec.application.v0_2.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.2')], typing.Annotated[ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='application')]
AnyDatasetDescr = typing.Annotated[typing.Union[typing.Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], typing.Annotated[DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')]
AnyGenericDescr = typing.Annotated[typing.Union[typing.Annotated[bioimageio.spec.generic.v0_2.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.2')], typing.Annotated[GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='generic')]
AnyModelDescr = typing.Annotated[typing.Union[typing.Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], typing.Annotated[ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')]
AnyNotebookDescr = typing.Annotated[typing.Union[typing.Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.2')], typing.Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='notebook')]
class ApplicationDescr(bioimageio.spec.generic.v0_3.GenericDescrBase):
33class ApplicationDescr(GenericDescrBase):
34    """Bioimage.io description of an application."""
35
36    implemented_type: ClassVar[Literal["application"]] = "application"
37    if TYPE_CHECKING:
38        type: Literal["application"] = "application"
39    else:
40        type: Literal["application"]
41
42    id: Optional[ApplicationId] = None
43    """bioimage.io-wide unique resource identifier
44    assigned by bioimage.io; version **un**specific."""
45
46    parent: Optional[ApplicationId] = None
47    """The description from which this one is derived"""
48
49    source: Annotated[
50        FAIR[Optional[FileSource_]],
51        Field(description="URL or path to the source of the application"),
52    ] = None
53    """The primary source of the application"""

Bioimage.io description of an application.

implemented_type: ClassVar[Literal['application']] = 'application'

bioimage.io-wide unique resource identifier assigned by bioimage.io; version unspecific.

The description from which this one is derived

source: Annotated[Optional[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file'), FieldInfo(annotation=NoneType, required=True, title='FilePath')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), AfterValidator(func=<function wo_special_file_name at 0x7efc0116c900>), PlainSerializer(func=<function _package_serializer at 0x7efbf1b27920>, return_type=PydanticUndefined, when_used='unless-none')]], AfterWarner(func=<function as_warning.<locals>.wrapper at 0x7efc006a6ac0>, severity=35, msg=None, context=None), FieldInfo(annotation=NoneType, required=True, description='URL or path to the source of the application')] = None

URL or path to the source of the application

implemented_format_version_tuple: ClassVar[Tuple[int, int, int]] = (0, 3, 0)
class BioimageioCondaEnv(bioimageio.spec.conda_env.CondaEnv):
 80class BioimageioCondaEnv(CondaEnv):
 81    """A special `CondaEnv` that
 82    - automatically adds bioimageio specific dependencies
 83    - sorts dependencies
 84    """
 85
 86    @model_validator(mode="after")
 87    def _normalize_bioimageio_conda_env(self):
 88        """update a conda env such that we have bioimageio.core and sorted dependencies"""
 89        for req_channel in ("conda-forge", "nodefaults"):
 90            if req_channel not in self.channels:
 91                self.channels.append(req_channel)
 92
 93        if "defaults" in self.channels:
 94            warnings.warn("removing 'defaults' from conda-channels")
 95            self.channels.remove("defaults")
 96
 97        if "pip" not in self.dependencies:
 98            self.dependencies.append("pip")
 99
100        for dep in self.dependencies:
101            if isinstance(dep, PipDeps):
102                pip_section = dep
103                pip_section.pip.sort()
104                break
105        else:
106            pip_section = None
107
108        if (
109            pip_section is None
110            or not any(pd.startswith("bioimageio.core") for pd in pip_section.pip)
111        ) and not any(
112            d.startswith("bioimageio.core")
113            or d.startswith("conda-forge::bioimageio.core")
114            for d in self.dependencies
115            if not isinstance(d, PipDeps)
116        ):
117            self.dependencies.append("conda-forge::bioimageio.core")
118
119        self.dependencies.sort()
120        return self

A special CondaEnv that

  • automatically adds bioimageio specific dependencies
  • sorts dependencies
def build_description( content: Mapping[str, YamlValueView], /, *, context: Optional[ValidationContext] = None, format_version: Union[Literal['latest', 'discover'], str] = 'discover') -> Union[Annotated[Union[Annotated[Union[Annotated[bioimageio.spec.application.v0_2.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.2')], Annotated[ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='application')], Annotated[Union[Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], Annotated[DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')], Annotated[Union[Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], Annotated[ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')], Annotated[Union[Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.2')], Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='notebook')]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.generic.v0_2.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.2')], Annotated[GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='generic')], InvalidDescr]:
175def build_description(
176    content: BioimageioYamlContentView,
177    /,
178    *,
179    context: Optional[ValidationContext] = None,
180    format_version: Union[FormatVersionPlaceholder, str] = DISCOVER,
181) -> Union[ResourceDescr, InvalidDescr]:
182    """build a bioimage.io resource description from an RDF's content.
183
184    Use `load_description` if you want to build a resource description from an rdf.yaml
185    or bioimage.io zip-package.
186
187    Args:
188        content: loaded rdf.yaml file (loaded with YAML, not bioimageio.spec)
189        context: validation context to use during validation
190        format_version:
191            (optional) use this argument to load the resource and
192            convert its metadata to a higher format_version.
193            Note:
194            - Use "latest" to convert to the latest available format version.
195            - Use "discover" to use the format version specified in the RDF.
196            - Only considers major.minor format version, ignores patch version.
197            - Conversion to lower format versions is not supported.
198
199    Returns:
200        An object holding all metadata of the bioimage.io resource
201
202    """
203
204    return build_description_impl(
205        content,
206        context=context,
207        format_version=format_version,
208        get_rd_class=_get_rd_class,
209    )

build a bioimage.io resource description from an RDF's content.

Use load_description if you want to build a resource description from an rdf.yaml or bioimage.io zip-package.

Arguments:
  • content: loaded rdf.yaml file (loaded with YAML, not bioimageio.spec)
  • context: validation context to use during validation
  • format_version: (optional) use this argument to load the resource and convert its metadata to a higher format_version. Note:
    • Use "latest" to convert to the latest available format version.
    • Use "discover" to use the format version specified in the RDF.
    • Only considers major.minor format version, ignores patch version.
    • Conversion to lower format versions is not supported.
Returns:

An object holding all metadata of the bioimage.io resource

class DatasetDescr(bioimageio.spec.generic.v0_3.GenericDescrBase):
 40class DatasetDescr(GenericDescrBase):
 41    """A bioimage.io dataset resource description file (dataset RDF) describes a dataset relevant to bioimage
 42    processing.
 43    """
 44
 45    implemented_type: ClassVar[Literal["dataset"]] = "dataset"
 46    if TYPE_CHECKING:
 47        type: Literal["dataset"] = "dataset"
 48    else:
 49        type: Literal["dataset"]
 50
 51    id: Optional[DatasetId] = None
 52    """bioimage.io-wide unique resource identifier
 53    assigned by bioimage.io; version **un**specific."""
 54
 55    parent: Optional[DatasetId] = None
 56    """The description from which this one is derived"""
 57
 58    source: FAIR[Optional[HttpUrl]] = None
 59    """"URL to the source of the dataset."""
 60
 61    @model_validator(mode="before")
 62    @classmethod
 63    def _convert(cls, data: Dict[str, Any], /) -> Dict[str, Any]:
 64        if (
 65            data.get("type") == "dataset"
 66            and isinstance(fv := data.get("format_version"), str)
 67            and fv.startswith("0.2.")
 68        ):
 69            old = DatasetDescr02.load(data)
 70            if isinstance(old, InvalidDescr):
 71                return data
 72
 73            return cast(
 74                Dict[str, Any],
 75                (cls if TYPE_CHECKING else dict)(
 76                    attachments=(
 77                        []
 78                        if old.attachments is None
 79                        else [FileDescr(source=f) for f in old.attachments.files]
 80                    ),
 81                    authors=[_author_conv.convert_as_dict(a) for a in old.authors],  # pyright: ignore[reportArgumentType]
 82                    badges=old.badges,
 83                    cite=[
 84                        {"text": c.text, "doi": c.doi, "url": c.url} for c in old.cite
 85                    ],  # pyright: ignore[reportArgumentType]
 86                    config=old.config,  # pyright: ignore[reportArgumentType]
 87                    covers=old.covers,
 88                    description=old.description,
 89                    documentation=old.documentation,
 90                    format_version="0.3.0",
 91                    git_repo=old.git_repo,  # pyright: ignore[reportArgumentType]
 92                    icon=old.icon,
 93                    id=None if old.id is None else DatasetId(old.id),
 94                    license=old.license,  # type: ignore
 95                    links=old.links,
 96                    maintainers=[
 97                        _maintainer_conv.convert_as_dict(m) for m in old.maintainers
 98                    ],  # pyright: ignore[reportArgumentType]
 99                    name=old.name,
100                    source=old.source,
101                    tags=old.tags,
102                    type=old.type,
103                    uploader=old.uploader,
104                    version=old.version,
105                    **(old.model_extra or {}),
106                ),
107            )
108
109        return data

A bioimage.io dataset resource description file (dataset RDF) describes a dataset relevant to bioimage processing.

implemented_type: ClassVar[Literal['dataset']] = 'dataset'

bioimage.io-wide unique resource identifier assigned by bioimage.io; version unspecific.

parent: Optional[bioimageio.spec.dataset.v0_3.DatasetId] = None

The description from which this one is derived

source: Annotated[Optional[bioimageio.spec._internal.url.HttpUrl], AfterWarner(func=<function as_warning.<locals>.wrapper at 0x7efc006a6ac0>, severity=35, msg=None, context=None)] = None

"URL to the source of the dataset.

implemented_format_version_tuple: ClassVar[Tuple[int, int, int]] = (0, 3, 0)
def dump_description( rd: Union[Annotated[Union[Annotated[Union[Annotated[bioimageio.spec.application.v0_2.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.2')], Annotated[ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='application')], Annotated[Union[Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], Annotated[DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')], Annotated[Union[Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], Annotated[ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')], Annotated[Union[Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.2')], Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='notebook')]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.generic.v0_2.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.2')], Annotated[GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='generic')], InvalidDescr], /, *, exclude_unset: bool = True, exclude_defaults: bool = False) -> Dict[str, YamlValue]:
66def dump_description(
67    rd: Union[ResourceDescr, InvalidDescr],
68    /,
69    *,
70    exclude_unset: bool = True,
71    exclude_defaults: bool = False,
72) -> BioimageioYamlContent:
73    """Converts a resource to a dictionary containing only simple types that can directly be serialzed to YAML.
74
75    Args:
76        rd: bioimageio resource description
77        exclude_unset: Exclude fields that have not explicitly be set.
78        exclude_defaults: Exclude fields that have the default value (even if set explicitly).
79    """
80    return rd.model_dump(
81        mode="json", exclude_unset=exclude_unset, exclude_defaults=exclude_defaults
82    )

Converts a resource to a dictionary containing only simple types that can directly be serialzed to YAML.

Arguments:
  • rd: bioimageio resource description
  • exclude_unset: Exclude fields that have not explicitly be set.
  • exclude_defaults: Exclude fields that have the default value (even if set explicitly).
def enable_pretty_validation_errors_in_ipynb():
92def enable_pretty_validation_errors_in_ipynb():
93    """DEPRECATED; this is enabled by default at import time."""
94    warnings.warn(
95        "deprecated, this is enabled by default at import time.",
96        DeprecationWarning,
97        stacklevel=2,
98    )

DEPRECATED; this is enabled by default at import time.

class GenericDescr(bioimageio.spec.generic.v0_3.GenericDescrBase):
490class GenericDescr(GenericDescrBase, extra="ignore"):
491    """Specification of the fields used in a generic bioimage.io-compliant resource description file (RDF).
492
493    An RDF is a YAML file that describes a resource such as a model, a dataset, or a notebook.
494    Note that those resources are described with a type-specific RDF.
495    Use this generic resource description, if none of the known specific types matches your resource.
496    """
497
498    implemented_type: ClassVar[Literal["generic"]] = "generic"
499    if TYPE_CHECKING:
500        type: Annotated[str, LowerCase] = "generic"
501        """The resource type assigns a broad category to the resource."""
502    else:
503        type: Annotated[str, LowerCase]
504        """The resource type assigns a broad category to the resource."""
505
506    id: Optional[
507        Annotated[ResourceId, Field(examples=["affable-shark", "ambitious-sloth"])]
508    ] = None
509    """bioimage.io-wide unique resource identifier
510    assigned by bioimage.io; version **un**specific."""
511
512    parent: Optional[ResourceId] = None
513    """The description from which this one is derived"""
514
515    source: Optional[HttpUrl] = None
516    """The primary source of the resource"""
517
518    @field_validator("type", mode="after")
519    @classmethod
520    def check_specific_types(cls, value: str) -> str:
521        if value in KNOWN_SPECIFIC_RESOURCE_TYPES:
522            raise ValueError(
523                f"Use the {value} description instead of this generic description for"
524                + f" your '{value}' resource."
525            )
526
527        return value

Specification of the fields used in a generic bioimage.io-compliant resource description file (RDF).

An RDF is a YAML file that describes a resource such as a model, a dataset, or a notebook. Note that those resources are described with a type-specific RDF. Use this generic resource description, if none of the known specific types matches your resource.

implemented_type: ClassVar[Literal['generic']] = 'generic'
id: Optional[Annotated[bioimageio.spec.generic.v0_3.ResourceId, FieldInfo(annotation=NoneType, required=True, examples=['affable-shark', 'ambitious-sloth'])]] = None

bioimage.io-wide unique resource identifier assigned by bioimage.io; version unspecific.

parent: Optional[bioimageio.spec.generic.v0_3.ResourceId] = None

The description from which this one is derived

source: Optional[bioimageio.spec._internal.url.HttpUrl] = None

The primary source of the resource

@field_validator('type', mode='after')
@classmethod
def check_specific_types(cls, value: str) -> str:
518    @field_validator("type", mode="after")
519    @classmethod
520    def check_specific_types(cls, value: str) -> str:
521        if value in KNOWN_SPECIFIC_RESOURCE_TYPES:
522            raise ValueError(
523                f"Use the {value} description instead of this generic description for"
524                + f" your '{value}' resource."
525            )
526
527        return value
implemented_format_version_tuple: ClassVar[Tuple[int, int, int]] = (0, 3, 0)
27def get_conda_env(
28    *,
29    entry: SupportedWeightsEntry,
30    env_name: Optional[Union[Literal["DROP"], str]] = None,
31) -> BioimageioCondaEnv:
32    """get the recommended Conda environment for a given weights entry description"""
33    if isinstance(entry, (v0_4.OnnxWeightsDescr, v0_5.OnnxWeightsDescr)):
34        conda_env = _get_default_onnx_env(opset_version=entry.opset_version)
35    elif isinstance(
36        entry,
37        (
38            v0_4.PytorchStateDictWeightsDescr,
39            v0_5.PytorchStateDictWeightsDescr,
40            v0_4.TorchscriptWeightsDescr,
41            v0_5.TorchscriptWeightsDescr,
42        ),
43    ):
44        if (
45            isinstance(entry, v0_5.TorchscriptWeightsDescr)
46            or entry.dependencies is None
47        ):
48            conda_env = _get_default_pytorch_env(pytorch_version=entry.pytorch_version)
49        else:
50            conda_env = _get_env_from_deps(entry.dependencies)
51
52    elif isinstance(
53        entry,
54        (
55            v0_4.TensorflowSavedModelBundleWeightsDescr,
56            v0_5.TensorflowSavedModelBundleWeightsDescr,
57        ),
58    ):
59        if entry.dependencies is None:
60            conda_env = _get_default_tf_env(tensorflow_version=entry.tensorflow_version)
61        else:
62            conda_env = _get_env_from_deps(entry.dependencies)
63    elif isinstance(
64        entry,
65        (v0_4.KerasHdf5WeightsDescr, v0_5.KerasHdf5WeightsDescr),
66    ):
67        conda_env = _get_default_tf_env(tensorflow_version=entry.tensorflow_version)
68    else:
69        assert_never(entry)
70
71    if env_name == "DROP":
72        conda_env.name = None
73    elif env_name is not None:
74        conda_env.name = env_name
75
76    return conda_env

get the recommended Conda environment for a given weights entry description

def get_resource_package_content( rd: Union[Annotated[Union[Annotated[Union[Annotated[bioimageio.spec.application.v0_2.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.2')], Annotated[ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='application')], Annotated[Union[Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], Annotated[DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')], Annotated[Union[Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], Annotated[ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')], Annotated[Union[Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.2')], Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='notebook')]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.generic.v0_2.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.2')], Annotated[GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='generic')]], /, *, bioimageio_yaml_file_name: str = 'rdf.yaml', weights_priority_order: Optional[Sequence[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript']]] = None) -> Dict[str, Union[bioimageio.spec._internal.url.HttpUrl, Annotated[pathlib.Path, PathType(path_type='file'), Predicate(is_absolute), FieldInfo(annotation=NoneType, required=True, title='AbsoluteFilePath')], Dict[str, YamlValue], zipp.Path]]:
40def get_resource_package_content(
41    rd: ResourceDescr,
42    /,
43    *,
44    bioimageio_yaml_file_name: FileName = BIOIMAGEIO_YAML,
45    weights_priority_order: Optional[Sequence[WeightsFormat]] = None,  # model only
46) -> Dict[FileName, Union[HttpUrl, AbsoluteFilePath, BioimageioYamlContent, ZipPath]]:
47    ret: Dict[
48        FileName, Union[HttpUrl, AbsoluteFilePath, BioimageioYamlContent, ZipPath]
49    ] = {}
50    for k, v in get_package_content(
51        rd,
52        bioimageio_yaml_file_name=bioimageio_yaml_file_name,
53        weights_priority_order=weights_priority_order,
54    ).items():
55        if isinstance(v, FileDescr):
56            if isinstance(v.source, (Path, RelativeFilePath)):
57                ret[k] = v.source.absolute()
58            else:
59                ret[k] = v.source
60
61        else:
62            ret[k] = v
63
64    return ret
def get_validation_context( default: Optional[ValidationContext] = None) -> ValidationContext:
209def get_validation_context(
210    default: Optional[ValidationContext] = None,
211) -> ValidationContext:
212    """Get the currently active validation context (or a default)"""
213    return _validation_context_var.get() or default or ValidationContext()

Get the currently active validation context (or a default)

396class InvalidDescr(
397    ResourceDescrBase,
398    extra="allow",
399    title="An invalid resource description",
400):
401    """A representation of an invalid resource description"""
402
403    implemented_type: ClassVar[Literal["unknown"]] = "unknown"
404    if TYPE_CHECKING:  # see NodeWithExplicitlySetFields
405        type: Any = "unknown"
406    else:
407        type: Any
408
409    implemented_format_version: ClassVar[Literal["unknown"]] = "unknown"
410    if TYPE_CHECKING:  # see NodeWithExplicitlySetFields
411        format_version: Any = "unknown"
412    else:
413        format_version: Any

A representation of an invalid resource description

implemented_type: ClassVar[Literal['unknown']] = 'unknown'
implemented_format_version: ClassVar[Literal['unknown']] = 'unknown'
implemented_format_version_tuple: ClassVar[Tuple[int, int, int]] = (0, 0, 0)
LatestResourceDescr = typing.Union[typing.Annotated[typing.Union[ApplicationDescr, DatasetDescr, ModelDescr, NotebookDescr], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], GenericDescr]
def load_dataset_description( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file'), FieldInfo(annotation=NoneType, required=True, title='FilePath')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, pydantic.networks.HttpUrl, zipfile.ZipFile], /, *, format_version: Union[Literal['latest', 'discover'], str] = 'discover', perform_io_checks: Optional[bool] = None, known_files: Optional[Dict[str, Optional[bioimageio.spec._internal.io_basics.Sha256]]] = None, sha256: Optional[bioimageio.spec._internal.io_basics.Sha256] = None) -> Annotated[Union[Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], Annotated[DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')]:
191def load_dataset_description(
192    source: Union[PermissiveFileSource, ZipFile],
193    /,
194    *,
195    format_version: Union[FormatVersionPlaceholder, str] = DISCOVER,
196    perform_io_checks: Optional[bool] = None,
197    known_files: Optional[Dict[str, Optional[Sha256]]] = None,
198    sha256: Optional[Sha256] = None,
199) -> AnyDatasetDescr:
200    """same as `load_description`, but addtionally ensures that the loaded
201    description is valid and of type 'dataset'.
202    """
203    rd = load_description(
204        source,
205        format_version=format_version,
206        perform_io_checks=perform_io_checks,
207        known_files=known_files,
208        sha256=sha256,
209    )
210    return ensure_description_is_dataset(rd)

same as load_description, but addtionally ensures that the loaded description is valid and of type 'dataset'.

def load_description_and_validate_format_only( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file'), FieldInfo(annotation=NoneType, required=True, title='FilePath')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, pydantic.networks.HttpUrl, zipfile.ZipFile], /, *, format_version: Union[Literal['latest', 'discover'], str] = 'discover', perform_io_checks: Optional[bool] = None, known_files: Optional[Dict[str, Optional[bioimageio.spec._internal.io_basics.Sha256]]] = None, sha256: Optional[bioimageio.spec._internal.io_basics.Sha256] = None) -> ValidationSummary:
243def load_description_and_validate_format_only(
244    source: Union[PermissiveFileSource, ZipFile],
245    /,
246    *,
247    format_version: Union[FormatVersionPlaceholder, str] = DISCOVER,
248    perform_io_checks: Optional[bool] = None,
249    known_files: Optional[Dict[str, Optional[Sha256]]] = None,
250    sha256: Optional[Sha256] = None,
251) -> ValidationSummary:
252    """same as `load_description`, but only return the validation summary.
253
254    Returns:
255        Validation summary of the bioimage.io resource found at `source`.
256
257    """
258    rd = load_description(
259        source,
260        format_version=format_version,
261        perform_io_checks=perform_io_checks,
262        known_files=known_files,
263        sha256=sha256,
264    )
265    assert rd.validation_summary is not None
266    return rd.validation_summary

same as load_description, but only return the validation summary.

Returns:

Validation summary of the bioimage.io resource found at source.

def load_description( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file'), FieldInfo(annotation=NoneType, required=True, title='FilePath')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, pydantic.networks.HttpUrl, zipfile.ZipFile], /, *, format_version: Union[Literal['latest', 'discover'], str] = 'discover', perform_io_checks: Optional[bool] = None, known_files: Optional[Dict[str, Optional[bioimageio.spec._internal.io_basics.Sha256]]] = None, sha256: Optional[bioimageio.spec._internal.io_basics.Sha256] = None) -> Union[Annotated[Union[Annotated[Union[Annotated[bioimageio.spec.application.v0_2.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.2')], Annotated[ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='application')], Annotated[Union[Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], Annotated[DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')], Annotated[Union[Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], Annotated[ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')], Annotated[Union[Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.2')], Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='notebook')]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.generic.v0_2.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.2')], Annotated[GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='generic')], InvalidDescr]:
 57def load_description(
 58    source: Union[PermissiveFileSource, ZipFile],
 59    /,
 60    *,
 61    format_version: Union[FormatVersionPlaceholder, str] = DISCOVER,
 62    perform_io_checks: Optional[bool] = None,
 63    known_files: Optional[Dict[str, Optional[Sha256]]] = None,
 64    sha256: Optional[Sha256] = None,
 65) -> Union[ResourceDescr, InvalidDescr]:
 66    """load a bioimage.io resource description
 67
 68    Args:
 69        source:
 70            Path or URL to an rdf.yaml or a bioimage.io package
 71            (zip-file with rdf.yaml in it).
 72        format_version:
 73            (optional) Use this argument to load the resource and
 74            convert its metadata to a higher format_version.
 75            Note:
 76            - Use "latest" to convert to the latest available format version.
 77            - Use "discover" to use the format version specified in the RDF.
 78            - Only considers major.minor format version, ignores patch version.
 79            - Conversion to lower format versions is not supported.
 80        perform_io_checks:
 81            Wether or not to perform validation that requires file io,
 82            e.g. downloading a remote files. The existence of local
 83            absolute file paths is still being checked.
 84        known_files:
 85            Allows to bypass download and hashing of referenced files
 86            (even if perform_io_checks is True).
 87            Checked files will be added to this dictionary
 88            with their SHA-256 value.
 89        sha256:
 90            Optional SHA-256 value of **source**
 91
 92    Returns:
 93        An object holding all metadata of the bioimage.io resource
 94
 95    """
 96    if isinstance(source, ResourceDescrBase):
 97        name = getattr(source, "name", f"{str(source)[:10]}...")
 98        logger.warning("returning already loaded description '{}' as is", name)
 99        return source  # pyright: ignore[reportReturnType]
100
101    opened = open_bioimageio_yaml(source, sha256=sha256)
102
103    context = get_validation_context().replace(
104        root=opened.original_root,
105        file_name=opened.original_file_name,
106        original_source_name=opened.original_source_name,
107        perform_io_checks=perform_io_checks,
108        known_files=known_files,
109    )
110
111    return build_description(
112        opened.content,
113        context=context,
114        format_version=format_version,
115    )

load a bioimage.io resource description

Arguments:
  • source: Path or URL to an rdf.yaml or a bioimage.io package (zip-file with rdf.yaml in it).
  • format_version: (optional) Use this argument to load the resource and convert its metadata to a higher format_version. Note:
    • Use "latest" to convert to the latest available format version.
    • Use "discover" to use the format version specified in the RDF.
    • Only considers major.minor format version, ignores patch version.
    • Conversion to lower format versions is not supported.
  • perform_io_checks: Wether or not to perform validation that requires file io, e.g. downloading a remote files. The existence of local absolute file paths is still being checked.
  • known_files: Allows to bypass download and hashing of referenced files (even if perform_io_checks is True). Checked files will be added to this dictionary with their SHA-256 value.
  • sha256: Optional SHA-256 value of source
Returns:

An object holding all metadata of the bioimage.io resource

def load_model_description( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file'), FieldInfo(annotation=NoneType, required=True, title='FilePath')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, pydantic.networks.HttpUrl, zipfile.ZipFile], /, *, format_version: Union[Literal['latest', 'discover'], str] = 'discover', perform_io_checks: Optional[bool] = None, known_files: Optional[Dict[str, Optional[bioimageio.spec._internal.io_basics.Sha256]]] = None, sha256: Optional[bioimageio.spec._internal.io_basics.Sha256] = None) -> Annotated[Union[Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], Annotated[ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')]:
142def load_model_description(
143    source: Union[PermissiveFileSource, ZipFile],
144    /,
145    *,
146    format_version: Union[FormatVersionPlaceholder, str] = DISCOVER,
147    perform_io_checks: Optional[bool] = None,
148    known_files: Optional[Dict[str, Optional[Sha256]]] = None,
149    sha256: Optional[Sha256] = None,
150) -> AnyModelDescr:
151    """same as `load_description`, but addtionally ensures that the loaded
152    description is valid and of type 'model'.
153
154    Raises:
155        ValueError: for invalid or non-model resources
156    """
157    rd = load_description(
158        source,
159        format_version=format_version,
160        perform_io_checks=perform_io_checks,
161        known_files=known_files,
162        sha256=sha256,
163    )
164    return ensure_description_is_model(rd)

same as load_description, but addtionally ensures that the loaded description is valid and of type 'model'.

Raises:
  • ValueError: for invalid or non-model resources
2635class ModelDescr(GenericModelDescrBase):
2636    """Specification of the fields used in a bioimage.io-compliant RDF to describe AI models with pretrained weights.
2637    These fields are typically stored in a YAML file which we call a model resource description file (model RDF).
2638    """
2639
2640    implemented_format_version: ClassVar[Literal["0.5.6"]] = "0.5.6"
2641    if TYPE_CHECKING:
2642        format_version: Literal["0.5.6"] = "0.5.6"
2643    else:
2644        format_version: Literal["0.5.6"]
2645        """Version of the bioimage.io model description specification used.
2646        When creating a new model always use the latest micro/patch version described here.
2647        The `format_version` is important for any consumer software to understand how to parse the fields.
2648        """
2649
2650    implemented_type: ClassVar[Literal["model"]] = "model"
2651    if TYPE_CHECKING:
2652        type: Literal["model"] = "model"
2653    else:
2654        type: Literal["model"]
2655        """Specialized resource type 'model'"""
2656
2657    id: Optional[ModelId] = None
2658    """bioimage.io-wide unique resource identifier
2659    assigned by bioimage.io; version **un**specific."""
2660
2661    authors: FAIR[List[Author]] = Field(
2662        default_factory=cast(Callable[[], List[Author]], list)
2663    )
2664    """The authors are the creators of the model RDF and the primary points of contact."""
2665
2666    documentation: FAIR[Optional[FileSource_documentation]] = None
2667    """URL or relative path to a markdown file with additional documentation.
2668    The recommended documentation file name is `README.md`. An `.md` suffix is mandatory.
2669    The documentation should include a '#[#] Validation' (sub)section
2670    with details on how to quantitatively validate the model on unseen data."""
2671
2672    @field_validator("documentation", mode="after")
2673    @classmethod
2674    def _validate_documentation(
2675        cls, value: Optional[FileSource_documentation]
2676    ) -> Optional[FileSource_documentation]:
2677        if not get_validation_context().perform_io_checks or value is None:
2678            return value
2679
2680        doc_reader = get_reader(value)
2681        doc_content = doc_reader.read().decode(encoding="utf-8")
2682        if not re.search("#.*[vV]alidation", doc_content):
2683            issue_warning(
2684                "No '# Validation' (sub)section found in {value}.",
2685                value=value,
2686                field="documentation",
2687            )
2688
2689        return value
2690
2691    inputs: NotEmpty[Sequence[InputTensorDescr]]
2692    """Describes the input tensors expected by this model."""
2693
2694    @field_validator("inputs", mode="after")
2695    @classmethod
2696    def _validate_input_axes(
2697        cls, inputs: Sequence[InputTensorDescr]
2698    ) -> Sequence[InputTensorDescr]:
2699        input_size_refs = cls._get_axes_with_independent_size(inputs)
2700
2701        for i, ipt in enumerate(inputs):
2702            valid_independent_refs: Dict[
2703                Tuple[TensorId, AxisId],
2704                Tuple[TensorDescr, AnyAxis, Union[int, ParameterizedSize]],
2705            ] = {
2706                **{
2707                    (ipt.id, a.id): (ipt, a, a.size)
2708                    for a in ipt.axes
2709                    if not isinstance(a, BatchAxis)
2710                    and isinstance(a.size, (int, ParameterizedSize))
2711                },
2712                **input_size_refs,
2713            }
2714            for a, ax in enumerate(ipt.axes):
2715                cls._validate_axis(
2716                    "inputs",
2717                    i=i,
2718                    tensor_id=ipt.id,
2719                    a=a,
2720                    axis=ax,
2721                    valid_independent_refs=valid_independent_refs,
2722                )
2723        return inputs
2724
2725    @staticmethod
2726    def _validate_axis(
2727        field_name: str,
2728        i: int,
2729        tensor_id: TensorId,
2730        a: int,
2731        axis: AnyAxis,
2732        valid_independent_refs: Dict[
2733            Tuple[TensorId, AxisId],
2734            Tuple[TensorDescr, AnyAxis, Union[int, ParameterizedSize]],
2735        ],
2736    ):
2737        if isinstance(axis, BatchAxis) or isinstance(
2738            axis.size, (int, ParameterizedSize, DataDependentSize)
2739        ):
2740            return
2741        elif not isinstance(axis.size, SizeReference):
2742            assert_never(axis.size)
2743
2744        # validate axis.size SizeReference
2745        ref = (axis.size.tensor_id, axis.size.axis_id)
2746        if ref not in valid_independent_refs:
2747            raise ValueError(
2748                "Invalid tensor axis reference at"
2749                + f" {field_name}[{i}].axes[{a}].size: {axis.size}."
2750            )
2751        if ref == (tensor_id, axis.id):
2752            raise ValueError(
2753                "Self-referencing not allowed for"
2754                + f" {field_name}[{i}].axes[{a}].size: {axis.size}"
2755            )
2756        if axis.type == "channel":
2757            if valid_independent_refs[ref][1].type != "channel":
2758                raise ValueError(
2759                    "A channel axis' size may only reference another fixed size"
2760                    + " channel axis."
2761                )
2762            if isinstance(axis.channel_names, str) and "{i}" in axis.channel_names:
2763                ref_size = valid_independent_refs[ref][2]
2764                assert isinstance(ref_size, int), (
2765                    "channel axis ref (another channel axis) has to specify fixed"
2766                    + " size"
2767                )
2768                generated_channel_names = [
2769                    Identifier(axis.channel_names.format(i=i))
2770                    for i in range(1, ref_size + 1)
2771                ]
2772                axis.channel_names = generated_channel_names
2773
2774        if (ax_unit := getattr(axis, "unit", None)) != (
2775            ref_unit := getattr(valid_independent_refs[ref][1], "unit", None)
2776        ):
2777            raise ValueError(
2778                "The units of an axis and its reference axis need to match, but"
2779                + f" '{ax_unit}' != '{ref_unit}'."
2780            )
2781        ref_axis = valid_independent_refs[ref][1]
2782        if isinstance(ref_axis, BatchAxis):
2783            raise ValueError(
2784                f"Invalid reference axis '{ref_axis.id}' for {tensor_id}.{axis.id}"
2785                + " (a batch axis is not allowed as reference)."
2786            )
2787
2788        if isinstance(axis, WithHalo):
2789            min_size = axis.size.get_size(axis, ref_axis, n=0)
2790            if (min_size - 2 * axis.halo) < 1:
2791                raise ValueError(
2792                    f"axis {axis.id} with minimum size {min_size} is too small for halo"
2793                    + f" {axis.halo}."
2794                )
2795
2796            input_halo = axis.halo * axis.scale / ref_axis.scale
2797            if input_halo != int(input_halo) or input_halo % 2 == 1:
2798                raise ValueError(
2799                    f"input_halo {input_halo} (output_halo {axis.halo} *"
2800                    + f" output_scale {axis.scale} / input_scale {ref_axis.scale})"
2801                    + f"     {tensor_id}.{axis.id}."
2802                )
2803
2804    @model_validator(mode="after")
2805    def _validate_test_tensors(self) -> Self:
2806        if not get_validation_context().perform_io_checks:
2807            return self
2808
2809        test_output_arrays = [
2810            None if descr.test_tensor is None else load_array(descr.test_tensor)
2811            for descr in self.outputs
2812        ]
2813        test_input_arrays = [
2814            None if descr.test_tensor is None else load_array(descr.test_tensor)
2815            for descr in self.inputs
2816        ]
2817
2818        tensors = {
2819            descr.id: (descr, array)
2820            for descr, array in zip(
2821                chain(self.inputs, self.outputs), test_input_arrays + test_output_arrays
2822            )
2823        }
2824        validate_tensors(tensors, tensor_origin="test_tensor")
2825
2826        output_arrays = {
2827            descr.id: array for descr, array in zip(self.outputs, test_output_arrays)
2828        }
2829        for rep_tol in self.config.bioimageio.reproducibility_tolerance:
2830            if not rep_tol.absolute_tolerance:
2831                continue
2832
2833            if rep_tol.output_ids:
2834                out_arrays = {
2835                    oid: a
2836                    for oid, a in output_arrays.items()
2837                    if oid in rep_tol.output_ids
2838                }
2839            else:
2840                out_arrays = output_arrays
2841
2842            for out_id, array in out_arrays.items():
2843                if array is None:
2844                    continue
2845
2846                if rep_tol.absolute_tolerance > (max_test_value := array.max()) * 0.01:
2847                    raise ValueError(
2848                        "config.bioimageio.reproducibility_tolerance.absolute_tolerance="
2849                        + f"{rep_tol.absolute_tolerance} > 0.01*{max_test_value}"
2850                        + f" (1% of the maximum value of the test tensor '{out_id}')"
2851                    )
2852
2853        return self
2854
2855    @model_validator(mode="after")
2856    def _validate_tensor_references_in_proc_kwargs(self, info: ValidationInfo) -> Self:
2857        ipt_refs = {t.id for t in self.inputs}
2858        out_refs = {t.id for t in self.outputs}
2859        for ipt in self.inputs:
2860            for p in ipt.preprocessing:
2861                ref = p.kwargs.get("reference_tensor")
2862                if ref is None:
2863                    continue
2864                if ref not in ipt_refs:
2865                    raise ValueError(
2866                        f"`reference_tensor` '{ref}' not found. Valid input tensor"
2867                        + f" references are: {ipt_refs}."
2868                    )
2869
2870        for out in self.outputs:
2871            for p in out.postprocessing:
2872                ref = p.kwargs.get("reference_tensor")
2873                if ref is None:
2874                    continue
2875
2876                if ref not in ipt_refs and ref not in out_refs:
2877                    raise ValueError(
2878                        f"`reference_tensor` '{ref}' not found. Valid tensor references"
2879                        + f" are: {ipt_refs | out_refs}."
2880                    )
2881
2882        return self
2883
2884    # TODO: use validate funcs in validate_test_tensors
2885    # def validate_inputs(self, input_tensors: Mapping[TensorId, NDArray[Any]]) -> Mapping[TensorId, NDArray[Any]]:
2886
2887    name: Annotated[
2888        str,
2889        RestrictCharacters(string.ascii_letters + string.digits + "_+- ()"),
2890        MinLen(5),
2891        MaxLen(128),
2892        warn(MaxLen(64), "Name longer than 64 characters.", INFO),
2893    ]
2894    """A human-readable name of this model.
2895    It should be no longer than 64 characters
2896    and may only contain letter, number, underscore, minus, parentheses and spaces.
2897    We recommend to chose a name that refers to the model's task and image modality.
2898    """
2899
2900    outputs: NotEmpty[Sequence[OutputTensorDescr]]
2901    """Describes the output tensors."""
2902
2903    @field_validator("outputs", mode="after")
2904    @classmethod
2905    def _validate_tensor_ids(
2906        cls, outputs: Sequence[OutputTensorDescr], info: ValidationInfo
2907    ) -> Sequence[OutputTensorDescr]:
2908        tensor_ids = [
2909            t.id for t in info.data.get("inputs", []) + info.data.get("outputs", [])
2910        ]
2911        duplicate_tensor_ids: List[str] = []
2912        seen: Set[str] = set()
2913        for t in tensor_ids:
2914            if t in seen:
2915                duplicate_tensor_ids.append(t)
2916
2917            seen.add(t)
2918
2919        if duplicate_tensor_ids:
2920            raise ValueError(f"Duplicate tensor ids: {duplicate_tensor_ids}")
2921
2922        return outputs
2923
2924    @staticmethod
2925    def _get_axes_with_parameterized_size(
2926        io: Union[Sequence[InputTensorDescr], Sequence[OutputTensorDescr]],
2927    ):
2928        return {
2929            f"{t.id}.{a.id}": (t, a, a.size)
2930            for t in io
2931            for a in t.axes
2932            if not isinstance(a, BatchAxis) and isinstance(a.size, ParameterizedSize)
2933        }
2934
2935    @staticmethod
2936    def _get_axes_with_independent_size(
2937        io: Union[Sequence[InputTensorDescr], Sequence[OutputTensorDescr]],
2938    ):
2939        return {
2940            (t.id, a.id): (t, a, a.size)
2941            for t in io
2942            for a in t.axes
2943            if not isinstance(a, BatchAxis)
2944            and isinstance(a.size, (int, ParameterizedSize))
2945        }
2946
2947    @field_validator("outputs", mode="after")
2948    @classmethod
2949    def _validate_output_axes(
2950        cls, outputs: List[OutputTensorDescr], info: ValidationInfo
2951    ) -> List[OutputTensorDescr]:
2952        input_size_refs = cls._get_axes_with_independent_size(
2953            info.data.get("inputs", [])
2954        )
2955        output_size_refs = cls._get_axes_with_independent_size(outputs)
2956
2957        for i, out in enumerate(outputs):
2958            valid_independent_refs: Dict[
2959                Tuple[TensorId, AxisId],
2960                Tuple[TensorDescr, AnyAxis, Union[int, ParameterizedSize]],
2961            ] = {
2962                **{
2963                    (out.id, a.id): (out, a, a.size)
2964                    for a in out.axes
2965                    if not isinstance(a, BatchAxis)
2966                    and isinstance(a.size, (int, ParameterizedSize))
2967                },
2968                **input_size_refs,
2969                **output_size_refs,
2970            }
2971            for a, ax in enumerate(out.axes):
2972                cls._validate_axis(
2973                    "outputs",
2974                    i,
2975                    out.id,
2976                    a,
2977                    ax,
2978                    valid_independent_refs=valid_independent_refs,
2979                )
2980
2981        return outputs
2982
2983    packaged_by: List[Author] = Field(
2984        default_factory=cast(Callable[[], List[Author]], list)
2985    )
2986    """The persons that have packaged and uploaded this model.
2987    Only required if those persons differ from the `authors`."""
2988
2989    parent: Optional[LinkedModel] = None
2990    """The model from which this model is derived, e.g. by fine-tuning the weights."""
2991
2992    @model_validator(mode="after")
2993    def _validate_parent_is_not_self(self) -> Self:
2994        if self.parent is not None and self.parent.id == self.id:
2995            raise ValueError("A model description may not reference itself as parent.")
2996
2997        return self
2998
2999    run_mode: Annotated[
3000        Optional[RunMode],
3001        warn(None, "Run mode '{value}' has limited support across consumer softwares."),
3002    ] = None
3003    """Custom run mode for this model: for more complex prediction procedures like test time
3004    data augmentation that currently cannot be expressed in the specification.
3005    No standard run modes are defined yet."""
3006
3007    timestamp: Datetime = Field(default_factory=Datetime.now)
3008    """Timestamp in [ISO 8601](#https://en.wikipedia.org/wiki/ISO_8601) format
3009    with a few restrictions listed [here](https://docs.python.org/3/library/datetime.html#datetime.datetime.fromisoformat).
3010    (In Python a datetime object is valid, too)."""
3011
3012    training_data: Annotated[
3013        Union[None, LinkedDataset, DatasetDescr, DatasetDescr02],
3014        Field(union_mode="left_to_right"),
3015    ] = None
3016    """The dataset used to train this model"""
3017
3018    weights: Annotated[WeightsDescr, WrapSerializer(package_weights)]
3019    """The weights for this model.
3020    Weights can be given for different formats, but should otherwise be equivalent.
3021    The available weight formats determine which consumers can use this model."""
3022
3023    config: Config = Field(default_factory=Config.model_construct)
3024
3025    @model_validator(mode="after")
3026    def _add_default_cover(self) -> Self:
3027        if not get_validation_context().perform_io_checks or self.covers:
3028            return self
3029
3030        try:
3031            generated_covers = generate_covers(
3032                [
3033                    (t, load_array(t.test_tensor))
3034                    for t in self.inputs
3035                    if t.test_tensor is not None
3036                ],
3037                [
3038                    (t, load_array(t.test_tensor))
3039                    for t in self.outputs
3040                    if t.test_tensor is not None
3041                ],
3042            )
3043        except Exception as e:
3044            issue_warning(
3045                "Failed to generate cover image(s): {e}",
3046                value=self.covers,
3047                msg_context=dict(e=e),
3048                field="covers",
3049            )
3050        else:
3051            self.covers.extend(generated_covers)
3052
3053        return self
3054
3055    def get_input_test_arrays(self) -> List[NDArray[Any]]:
3056        return self._get_test_arrays(self.inputs)
3057
3058    def get_output_test_arrays(self) -> List[NDArray[Any]]:
3059        return self._get_test_arrays(self.outputs)
3060
3061    @staticmethod
3062    def _get_test_arrays(
3063        io_descr: Union[Sequence[InputTensorDescr], Sequence[OutputTensorDescr]],
3064    ):
3065        ts: List[FileDescr] = []
3066        for d in io_descr:
3067            if d.test_tensor is None:
3068                raise ValueError(
3069                    f"Failed to get test arrays: description of '{d.id}' is missing a `test_tensor`."
3070                )
3071            ts.append(d.test_tensor)
3072
3073        data = [load_array(t) for t in ts]
3074        assert all(isinstance(d, np.ndarray) for d in data)
3075        return data
3076
3077    @staticmethod
3078    def get_batch_size(tensor_sizes: Mapping[TensorId, Mapping[AxisId, int]]) -> int:
3079        batch_size = 1
3080        tensor_with_batchsize: Optional[TensorId] = None
3081        for tid in tensor_sizes:
3082            for aid, s in tensor_sizes[tid].items():
3083                if aid != BATCH_AXIS_ID or s == 1 or s == batch_size:
3084                    continue
3085
3086                if batch_size != 1:
3087                    assert tensor_with_batchsize is not None
3088                    raise ValueError(
3089                        f"batch size mismatch for tensors '{tensor_with_batchsize}' ({batch_size}) and '{tid}' ({s})"
3090                    )
3091
3092                batch_size = s
3093                tensor_with_batchsize = tid
3094
3095        return batch_size
3096
3097    def get_output_tensor_sizes(
3098        self, input_sizes: Mapping[TensorId, Mapping[AxisId, int]]
3099    ) -> Dict[TensorId, Dict[AxisId, Union[int, _DataDepSize]]]:
3100        """Returns the tensor output sizes for given **input_sizes**.
3101        Only if **input_sizes** has a valid input shape, the tensor output size is exact.
3102        Otherwise it might be larger than the actual (valid) output"""
3103        batch_size = self.get_batch_size(input_sizes)
3104        ns = self.get_ns(input_sizes)
3105
3106        tensor_sizes = self.get_tensor_sizes(ns, batch_size=batch_size)
3107        return tensor_sizes.outputs
3108
3109    def get_ns(self, input_sizes: Mapping[TensorId, Mapping[AxisId, int]]):
3110        """get parameter `n` for each parameterized axis
3111        such that the valid input size is >= the given input size"""
3112        ret: Dict[Tuple[TensorId, AxisId], ParameterizedSize_N] = {}
3113        axes = {t.id: {a.id: a for a in t.axes} for t in self.inputs}
3114        for tid in input_sizes:
3115            for aid, s in input_sizes[tid].items():
3116                size_descr = axes[tid][aid].size
3117                if isinstance(size_descr, ParameterizedSize):
3118                    ret[(tid, aid)] = size_descr.get_n(s)
3119                elif size_descr is None or isinstance(size_descr, (int, SizeReference)):
3120                    pass
3121                else:
3122                    assert_never(size_descr)
3123
3124        return ret
3125
3126    def get_tensor_sizes(
3127        self, ns: Mapping[Tuple[TensorId, AxisId], ParameterizedSize_N], batch_size: int
3128    ) -> _TensorSizes:
3129        axis_sizes = self.get_axis_sizes(ns, batch_size=batch_size)
3130        return _TensorSizes(
3131            {
3132                t: {
3133                    aa: axis_sizes.inputs[(tt, aa)]
3134                    for tt, aa in axis_sizes.inputs
3135                    if tt == t
3136                }
3137                for t in {tt for tt, _ in axis_sizes.inputs}
3138            },
3139            {
3140                t: {
3141                    aa: axis_sizes.outputs[(tt, aa)]
3142                    for tt, aa in axis_sizes.outputs
3143                    if tt == t
3144                }
3145                for t in {tt for tt, _ in axis_sizes.outputs}
3146            },
3147        )
3148
3149    def get_axis_sizes(
3150        self,
3151        ns: Mapping[Tuple[TensorId, AxisId], ParameterizedSize_N],
3152        batch_size: Optional[int] = None,
3153        *,
3154        max_input_shape: Optional[Mapping[Tuple[TensorId, AxisId], int]] = None,
3155    ) -> _AxisSizes:
3156        """Determine input and output block shape for scale factors **ns**
3157        of parameterized input sizes.
3158
3159        Args:
3160            ns: Scale factor `n` for each axis (keyed by (tensor_id, axis_id))
3161                that is parameterized as `size = min + n * step`.
3162            batch_size: The desired size of the batch dimension.
3163                If given **batch_size** overwrites any batch size present in
3164                **max_input_shape**. Default 1.
3165            max_input_shape: Limits the derived block shapes.
3166                Each axis for which the input size, parameterized by `n`, is larger
3167                than **max_input_shape** is set to the minimal value `n_min` for which
3168                this is still true.
3169                Use this for small input samples or large values of **ns**.
3170                Or simply whenever you know the full input shape.
3171
3172        Returns:
3173            Resolved axis sizes for model inputs and outputs.
3174        """
3175        max_input_shape = max_input_shape or {}
3176        if batch_size is None:
3177            for (_t_id, a_id), s in max_input_shape.items():
3178                if a_id == BATCH_AXIS_ID:
3179                    batch_size = s
3180                    break
3181            else:
3182                batch_size = 1
3183
3184        all_axes = {
3185            t.id: {a.id: a for a in t.axes} for t in chain(self.inputs, self.outputs)
3186        }
3187
3188        inputs: Dict[Tuple[TensorId, AxisId], int] = {}
3189        outputs: Dict[Tuple[TensorId, AxisId], Union[int, _DataDepSize]] = {}
3190
3191        def get_axis_size(a: Union[InputAxis, OutputAxis]):
3192            if isinstance(a, BatchAxis):
3193                if (t_descr.id, a.id) in ns:
3194                    logger.warning(
3195                        "Ignoring unexpected size increment factor (n) for batch axis"
3196                        + " of tensor '{}'.",
3197                        t_descr.id,
3198                    )
3199                return batch_size
3200            elif isinstance(a.size, int):
3201                if (t_descr.id, a.id) in ns:
3202                    logger.warning(
3203                        "Ignoring unexpected size increment factor (n) for fixed size"
3204                        + " axis '{}' of tensor '{}'.",
3205                        a.id,
3206                        t_descr.id,
3207                    )
3208                return a.size
3209            elif isinstance(a.size, ParameterizedSize):
3210                if (t_descr.id, a.id) not in ns:
3211                    raise ValueError(
3212                        "Size increment factor (n) missing for parametrized axis"
3213                        + f" '{a.id}' of tensor '{t_descr.id}'."
3214                    )
3215                n = ns[(t_descr.id, a.id)]
3216                s_max = max_input_shape.get((t_descr.id, a.id))
3217                if s_max is not None:
3218                    n = min(n, a.size.get_n(s_max))
3219
3220                return a.size.get_size(n)
3221
3222            elif isinstance(a.size, SizeReference):
3223                if (t_descr.id, a.id) in ns:
3224                    logger.warning(
3225                        "Ignoring unexpected size increment factor (n) for axis '{}'"
3226                        + " of tensor '{}' with size reference.",
3227                        a.id,
3228                        t_descr.id,
3229                    )
3230                assert not isinstance(a, BatchAxis)
3231                ref_axis = all_axes[a.size.tensor_id][a.size.axis_id]
3232                assert not isinstance(ref_axis, BatchAxis)
3233                ref_key = (a.size.tensor_id, a.size.axis_id)
3234                ref_size = inputs.get(ref_key, outputs.get(ref_key))
3235                assert ref_size is not None, ref_key
3236                assert not isinstance(ref_size, _DataDepSize), ref_key
3237                return a.size.get_size(
3238                    axis=a,
3239                    ref_axis=ref_axis,
3240                    ref_size=ref_size,
3241                )
3242            elif isinstance(a.size, DataDependentSize):
3243                if (t_descr.id, a.id) in ns:
3244                    logger.warning(
3245                        "Ignoring unexpected increment factor (n) for data dependent"
3246                        + " size axis '{}' of tensor '{}'.",
3247                        a.id,
3248                        t_descr.id,
3249                    )
3250                return _DataDepSize(a.size.min, a.size.max)
3251            else:
3252                assert_never(a.size)
3253
3254        # first resolve all , but the `SizeReference` input sizes
3255        for t_descr in self.inputs:
3256            for a in t_descr.axes:
3257                if not isinstance(a.size, SizeReference):
3258                    s = get_axis_size(a)
3259                    assert not isinstance(s, _DataDepSize)
3260                    inputs[t_descr.id, a.id] = s
3261
3262        # resolve all other input axis sizes
3263        for t_descr in self.inputs:
3264            for a in t_descr.axes:
3265                if isinstance(a.size, SizeReference):
3266                    s = get_axis_size(a)
3267                    assert not isinstance(s, _DataDepSize)
3268                    inputs[t_descr.id, a.id] = s
3269
3270        # resolve all output axis sizes
3271        for t_descr in self.outputs:
3272            for a in t_descr.axes:
3273                assert not isinstance(a.size, ParameterizedSize)
3274                s = get_axis_size(a)
3275                outputs[t_descr.id, a.id] = s
3276
3277        return _AxisSizes(inputs=inputs, outputs=outputs)
3278
3279    @model_validator(mode="before")
3280    @classmethod
3281    def _convert(cls, data: Dict[str, Any]) -> Dict[str, Any]:
3282        cls.convert_from_old_format_wo_validation(data)
3283        return data
3284
3285    @classmethod
3286    def convert_from_old_format_wo_validation(cls, data: Dict[str, Any]) -> None:
3287        """Convert metadata following an older format version to this classes' format
3288        without validating the result.
3289        """
3290        if (
3291            data.get("type") == "model"
3292            and isinstance(fv := data.get("format_version"), str)
3293            and fv.count(".") == 2
3294        ):
3295            fv_parts = fv.split(".")
3296            if any(not p.isdigit() for p in fv_parts):
3297                return
3298
3299            fv_tuple = tuple(map(int, fv_parts))
3300
3301            assert cls.implemented_format_version_tuple[0:2] == (0, 5)
3302            if fv_tuple[:2] in ((0, 3), (0, 4)):
3303                m04 = _ModelDescr_v0_4.load(data)
3304                if isinstance(m04, InvalidDescr):
3305                    try:
3306                        updated = _model_conv.convert_as_dict(
3307                            m04  # pyright: ignore[reportArgumentType]
3308                        )
3309                    except Exception as e:
3310                        logger.error(
3311                            "Failed to convert from invalid model 0.4 description."
3312                            + f"\nerror: {e}"
3313                            + "\nProceeding with model 0.5 validation without conversion."
3314                        )
3315                        updated = None
3316                else:
3317                    updated = _model_conv.convert_as_dict(m04)
3318
3319                if updated is not None:
3320                    data.clear()
3321                    data.update(updated)
3322
3323            elif fv_tuple[:2] == (0, 5):
3324                # bump patch version
3325                data["format_version"] = cls.implemented_format_version

Specification of the fields used in a bioimage.io-compliant RDF to describe AI models with pretrained weights. These fields are typically stored in a YAML file which we call a model resource description file (model RDF).

implemented_format_version: ClassVar[Literal['0.5.6']] = '0.5.6'
implemented_type: ClassVar[Literal['model']] = 'model'
id: Optional[bioimageio.spec.model.v0_5.ModelId] = None

bioimage.io-wide unique resource identifier assigned by bioimage.io; version unspecific.

authors: Annotated[List[bioimageio.spec.generic.v0_3.Author], AfterWarner(func=<function as_warning.<locals>.wrapper at 0x7efc006a6ac0>, severity=35, msg=None, context=None)] = PydanticUndefined

The authors are the creators of the model RDF and the primary points of contact.

documentation: Annotated[Optional[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file'), FieldInfo(annotation=NoneType, required=True, title='FilePath')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), AfterValidator(func=<function wo_special_file_name at 0x7efc0116c900>), PlainSerializer(func=<function _package_serializer at 0x7efbf1b27920>, return_type=PydanticUndefined, when_used='unless-none'), WithSuffix(suffix='.md', case_sensitive=True), FieldInfo(annotation=NoneType, required=True, examples=['https://raw.githubusercontent.com/bioimage-io/spec-bioimage-io/main/example_descriptions/models/unet2d_nuclei_broad/README.md', 'README.md'])]], AfterWarner(func=<function as_warning.<locals>.wrapper at 0x7efc006a6ac0>, severity=35, msg=None, context=None)] = None

URL or relative path to a markdown file with additional documentation. The recommended documentation file name is README.md. An .md suffix is mandatory. The documentation should include a '#[#] Validation' (sub)section with details on how to quantitatively validate the model on unseen data.

inputs: Annotated[Sequence[bioimageio.spec.model.v0_5.InputTensorDescr], MinLen(min_length=1)] = PydanticUndefined

Describes the input tensors expected by this model.

name: Annotated[str, RestrictCharacters(alphabet='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_+- ()'), MinLen(min_length=5), MaxLen(max_length=128), AfterWarner(func=<function as_warning.<locals>.wrapper at 0x7efbf01b5260>, severity=20, msg='Name longer than 64 characters.', context={'typ': Annotated[Any, MaxLen(max_length=64)]})] = PydanticUndefined

A human-readable name of this model. It should be no longer than 64 characters and may only contain letter, number, underscore, minus, parentheses and spaces. We recommend to chose a name that refers to the model's task and image modality.

outputs: Annotated[Sequence[bioimageio.spec.model.v0_5.OutputTensorDescr], MinLen(min_length=1)] = PydanticUndefined

Describes the output tensors.

packaged_by: List[bioimageio.spec.generic.v0_3.Author] = PydanticUndefined

The persons that have packaged and uploaded this model. Only required if those persons differ from the authors.

parent: Optional[bioimageio.spec.model.v0_5.LinkedModel] = None

The model from which this model is derived, e.g. by fine-tuning the weights.

run_mode: Annotated[Optional[bioimageio.spec.model.v0_4.RunMode], AfterWarner(func=<function as_warning.<locals>.wrapper at 0x7efbf01b51c0>, severity=30, msg="Run mode '{value}' has limited support across consumer softwares.", context={'typ': None})] = None

Custom run mode for this model: for more complex prediction procedures like test time data augmentation that currently cannot be expressed in the specification. No standard run modes are defined yet.

timestamp: bioimageio.spec._internal.types.Datetime = PydanticUndefined

Timestamp in ISO 8601 format with a few restrictions listed here. (In Python a datetime object is valid, too).

training_data: Annotated[Union[NoneType, bioimageio.spec.dataset.v0_3.LinkedDataset, DatasetDescr, bioimageio.spec.dataset.v0_2.DatasetDescr], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])] = None

The dataset used to train this model

weights: Annotated[bioimageio.spec.model.v0_5.WeightsDescr, WrapSerializer(func=<function package_weights at 0x7efbf19c6200>, return_type=PydanticUndefined, when_used='always')] = PydanticUndefined

The weights for this model. Weights can be given for different formats, but should otherwise be equivalent. The available weight formats determine which consumers can use this model.

config: bioimageio.spec.model.v0_5.Config = PydanticUndefined
def get_input_test_arrays(self) -> List[numpy.ndarray[tuple[Any, ...], numpy.dtype[Any]]]:
3055    def get_input_test_arrays(self) -> List[NDArray[Any]]:
3056        return self._get_test_arrays(self.inputs)
def get_output_test_arrays(self) -> List[numpy.ndarray[tuple[Any, ...], numpy.dtype[Any]]]:
3058    def get_output_test_arrays(self) -> List[NDArray[Any]]:
3059        return self._get_test_arrays(self.outputs)
@staticmethod
def get_batch_size( tensor_sizes: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[bioimageio.spec.model.v0_5.AxisId, int]]) -> int:
3077    @staticmethod
3078    def get_batch_size(tensor_sizes: Mapping[TensorId, Mapping[AxisId, int]]) -> int:
3079        batch_size = 1
3080        tensor_with_batchsize: Optional[TensorId] = None
3081        for tid in tensor_sizes:
3082            for aid, s in tensor_sizes[tid].items():
3083                if aid != BATCH_AXIS_ID or s == 1 or s == batch_size:
3084                    continue
3085
3086                if batch_size != 1:
3087                    assert tensor_with_batchsize is not None
3088                    raise ValueError(
3089                        f"batch size mismatch for tensors '{tensor_with_batchsize}' ({batch_size}) and '{tid}' ({s})"
3090                    )
3091
3092                batch_size = s
3093                tensor_with_batchsize = tid
3094
3095        return batch_size
def get_output_tensor_sizes( self, input_sizes: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[bioimageio.spec.model.v0_5.AxisId, int]]) -> Dict[bioimageio.spec.model.v0_5.TensorId, Dict[bioimageio.spec.model.v0_5.AxisId, Union[int, bioimageio.spec.model.v0_5._DataDepSize]]]:
3097    def get_output_tensor_sizes(
3098        self, input_sizes: Mapping[TensorId, Mapping[AxisId, int]]
3099    ) -> Dict[TensorId, Dict[AxisId, Union[int, _DataDepSize]]]:
3100        """Returns the tensor output sizes for given **input_sizes**.
3101        Only if **input_sizes** has a valid input shape, the tensor output size is exact.
3102        Otherwise it might be larger than the actual (valid) output"""
3103        batch_size = self.get_batch_size(input_sizes)
3104        ns = self.get_ns(input_sizes)
3105
3106        tensor_sizes = self.get_tensor_sizes(ns, batch_size=batch_size)
3107        return tensor_sizes.outputs

Returns the tensor output sizes for given input_sizes. Only if input_sizes has a valid input shape, the tensor output size is exact. Otherwise it might be larger than the actual (valid) output

def get_ns( self, input_sizes: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[bioimageio.spec.model.v0_5.AxisId, int]]):
3109    def get_ns(self, input_sizes: Mapping[TensorId, Mapping[AxisId, int]]):
3110        """get parameter `n` for each parameterized axis
3111        such that the valid input size is >= the given input size"""
3112        ret: Dict[Tuple[TensorId, AxisId], ParameterizedSize_N] = {}
3113        axes = {t.id: {a.id: a for a in t.axes} for t in self.inputs}
3114        for tid in input_sizes:
3115            for aid, s in input_sizes[tid].items():
3116                size_descr = axes[tid][aid].size
3117                if isinstance(size_descr, ParameterizedSize):
3118                    ret[(tid, aid)] = size_descr.get_n(s)
3119                elif size_descr is None or isinstance(size_descr, (int, SizeReference)):
3120                    pass
3121                else:
3122                    assert_never(size_descr)
3123
3124        return ret

get parameter n for each parameterized axis such that the valid input size is >= the given input size

def get_tensor_sizes( self, ns: Mapping[Tuple[bioimageio.spec.model.v0_5.TensorId, bioimageio.spec.model.v0_5.AxisId], int], batch_size: int) -> bioimageio.spec.model.v0_5._TensorSizes:
3126    def get_tensor_sizes(
3127        self, ns: Mapping[Tuple[TensorId, AxisId], ParameterizedSize_N], batch_size: int
3128    ) -> _TensorSizes:
3129        axis_sizes = self.get_axis_sizes(ns, batch_size=batch_size)
3130        return _TensorSizes(
3131            {
3132                t: {
3133                    aa: axis_sizes.inputs[(tt, aa)]
3134                    for tt, aa in axis_sizes.inputs
3135                    if tt == t
3136                }
3137                for t in {tt for tt, _ in axis_sizes.inputs}
3138            },
3139            {
3140                t: {
3141                    aa: axis_sizes.outputs[(tt, aa)]
3142                    for tt, aa in axis_sizes.outputs
3143                    if tt == t
3144                }
3145                for t in {tt for tt, _ in axis_sizes.outputs}
3146            },
3147        )
def get_axis_sizes( self, ns: Mapping[Tuple[bioimageio.spec.model.v0_5.TensorId, bioimageio.spec.model.v0_5.AxisId], int], batch_size: Optional[int] = None, *, max_input_shape: Optional[Mapping[Tuple[bioimageio.spec.model.v0_5.TensorId, bioimageio.spec.model.v0_5.AxisId], int]] = None) -> bioimageio.spec.model.v0_5._AxisSizes:
3149    def get_axis_sizes(
3150        self,
3151        ns: Mapping[Tuple[TensorId, AxisId], ParameterizedSize_N],
3152        batch_size: Optional[int] = None,
3153        *,
3154        max_input_shape: Optional[Mapping[Tuple[TensorId, AxisId], int]] = None,
3155    ) -> _AxisSizes:
3156        """Determine input and output block shape for scale factors **ns**
3157        of parameterized input sizes.
3158
3159        Args:
3160            ns: Scale factor `n` for each axis (keyed by (tensor_id, axis_id))
3161                that is parameterized as `size = min + n * step`.
3162            batch_size: The desired size of the batch dimension.
3163                If given **batch_size** overwrites any batch size present in
3164                **max_input_shape**. Default 1.
3165            max_input_shape: Limits the derived block shapes.
3166                Each axis for which the input size, parameterized by `n`, is larger
3167                than **max_input_shape** is set to the minimal value `n_min` for which
3168                this is still true.
3169                Use this for small input samples or large values of **ns**.
3170                Or simply whenever you know the full input shape.
3171
3172        Returns:
3173            Resolved axis sizes for model inputs and outputs.
3174        """
3175        max_input_shape = max_input_shape or {}
3176        if batch_size is None:
3177            for (_t_id, a_id), s in max_input_shape.items():
3178                if a_id == BATCH_AXIS_ID:
3179                    batch_size = s
3180                    break
3181            else:
3182                batch_size = 1
3183
3184        all_axes = {
3185            t.id: {a.id: a for a in t.axes} for t in chain(self.inputs, self.outputs)
3186        }
3187
3188        inputs: Dict[Tuple[TensorId, AxisId], int] = {}
3189        outputs: Dict[Tuple[TensorId, AxisId], Union[int, _DataDepSize]] = {}
3190
3191        def get_axis_size(a: Union[InputAxis, OutputAxis]):
3192            if isinstance(a, BatchAxis):
3193                if (t_descr.id, a.id) in ns:
3194                    logger.warning(
3195                        "Ignoring unexpected size increment factor (n) for batch axis"
3196                        + " of tensor '{}'.",
3197                        t_descr.id,
3198                    )
3199                return batch_size
3200            elif isinstance(a.size, int):
3201                if (t_descr.id, a.id) in ns:
3202                    logger.warning(
3203                        "Ignoring unexpected size increment factor (n) for fixed size"
3204                        + " axis '{}' of tensor '{}'.",
3205                        a.id,
3206                        t_descr.id,
3207                    )
3208                return a.size
3209            elif isinstance(a.size, ParameterizedSize):
3210                if (t_descr.id, a.id) not in ns:
3211                    raise ValueError(
3212                        "Size increment factor (n) missing for parametrized axis"
3213                        + f" '{a.id}' of tensor '{t_descr.id}'."
3214                    )
3215                n = ns[(t_descr.id, a.id)]
3216                s_max = max_input_shape.get((t_descr.id, a.id))
3217                if s_max is not None:
3218                    n = min(n, a.size.get_n(s_max))
3219
3220                return a.size.get_size(n)
3221
3222            elif isinstance(a.size, SizeReference):
3223                if (t_descr.id, a.id) in ns:
3224                    logger.warning(
3225                        "Ignoring unexpected size increment factor (n) for axis '{}'"
3226                        + " of tensor '{}' with size reference.",
3227                        a.id,
3228                        t_descr.id,
3229                    )
3230                assert not isinstance(a, BatchAxis)
3231                ref_axis = all_axes[a.size.tensor_id][a.size.axis_id]
3232                assert not isinstance(ref_axis, BatchAxis)
3233                ref_key = (a.size.tensor_id, a.size.axis_id)
3234                ref_size = inputs.get(ref_key, outputs.get(ref_key))
3235                assert ref_size is not None, ref_key
3236                assert not isinstance(ref_size, _DataDepSize), ref_key
3237                return a.size.get_size(
3238                    axis=a,
3239                    ref_axis=ref_axis,
3240                    ref_size=ref_size,
3241                )
3242            elif isinstance(a.size, DataDependentSize):
3243                if (t_descr.id, a.id) in ns:
3244                    logger.warning(
3245                        "Ignoring unexpected increment factor (n) for data dependent"
3246                        + " size axis '{}' of tensor '{}'.",
3247                        a.id,
3248                        t_descr.id,
3249                    )
3250                return _DataDepSize(a.size.min, a.size.max)
3251            else:
3252                assert_never(a.size)
3253
3254        # first resolve all , but the `SizeReference` input sizes
3255        for t_descr in self.inputs:
3256            for a in t_descr.axes:
3257                if not isinstance(a.size, SizeReference):
3258                    s = get_axis_size(a)
3259                    assert not isinstance(s, _DataDepSize)
3260                    inputs[t_descr.id, a.id] = s
3261
3262        # resolve all other input axis sizes
3263        for t_descr in self.inputs:
3264            for a in t_descr.axes:
3265                if isinstance(a.size, SizeReference):
3266                    s = get_axis_size(a)
3267                    assert not isinstance(s, _DataDepSize)
3268                    inputs[t_descr.id, a.id] = s
3269
3270        # resolve all output axis sizes
3271        for t_descr in self.outputs:
3272            for a in t_descr.axes:
3273                assert not isinstance(a.size, ParameterizedSize)
3274                s = get_axis_size(a)
3275                outputs[t_descr.id, a.id] = s
3276
3277        return _AxisSizes(inputs=inputs, outputs=outputs)

Determine input and output block shape for scale factors ns of parameterized input sizes.

Arguments:
  • ns: Scale factor n for each axis (keyed by (tensor_id, axis_id)) that is parameterized as size = min + n * step.
  • batch_size: The desired size of the batch dimension. If given batch_size overwrites any batch size present in max_input_shape. Default 1.
  • max_input_shape: Limits the derived block shapes. Each axis for which the input size, parameterized by n, is larger than max_input_shape is set to the minimal value n_min for which this is still true. Use this for small input samples or large values of ns. Or simply whenever you know the full input shape.
Returns:

Resolved axis sizes for model inputs and outputs.

@classmethod
def convert_from_old_format_wo_validation(cls, data: Dict[str, Any]) -> None:
3285    @classmethod
3286    def convert_from_old_format_wo_validation(cls, data: Dict[str, Any]) -> None:
3287        """Convert metadata following an older format version to this classes' format
3288        without validating the result.
3289        """
3290        if (
3291            data.get("type") == "model"
3292            and isinstance(fv := data.get("format_version"), str)
3293            and fv.count(".") == 2
3294        ):
3295            fv_parts = fv.split(".")
3296            if any(not p.isdigit() for p in fv_parts):
3297                return
3298
3299            fv_tuple = tuple(map(int, fv_parts))
3300
3301            assert cls.implemented_format_version_tuple[0:2] == (0, 5)
3302            if fv_tuple[:2] in ((0, 3), (0, 4)):
3303                m04 = _ModelDescr_v0_4.load(data)
3304                if isinstance(m04, InvalidDescr):
3305                    try:
3306                        updated = _model_conv.convert_as_dict(
3307                            m04  # pyright: ignore[reportArgumentType]
3308                        )
3309                    except Exception as e:
3310                        logger.error(
3311                            "Failed to convert from invalid model 0.4 description."
3312                            + f"\nerror: {e}"
3313                            + "\nProceeding with model 0.5 validation without conversion."
3314                        )
3315                        updated = None
3316                else:
3317                    updated = _model_conv.convert_as_dict(m04)
3318
3319                if updated is not None:
3320                    data.clear()
3321                    data.update(updated)
3322
3323            elif fv_tuple[:2] == (0, 5):
3324                # bump patch version
3325                data["format_version"] = cls.implemented_format_version

Convert metadata following an older format version to this classes' format without validating the result.

implemented_format_version_tuple: ClassVar[Tuple[int, int, int]] = (0, 5, 6)
class NotebookDescr(bioimageio.spec.generic.v0_3.GenericDescrBase):
31class NotebookDescr(GenericDescrBase):
32    """Bioimage.io description of a Jupyter notebook."""
33
34    implemented_type: ClassVar[Literal["notebook"]] = "notebook"
35    if TYPE_CHECKING:
36        type: Literal["notebook"] = "notebook"
37    else:
38        type: Literal["notebook"]
39
40    id: Optional[NotebookId] = None
41    """bioimage.io-wide unique resource identifier
42    assigned by bioimage.io; version **un**specific."""
43
44    parent: Optional[NotebookId] = None
45    """The description from which this one is derived"""
46
47    source: NotebookSource
48    """The Jupyter notebook"""

Bioimage.io description of a Jupyter notebook.

implemented_type: ClassVar[Literal['notebook']] = 'notebook'
id: Optional[bioimageio.spec.notebook.v0_3.NotebookId] = None

bioimage.io-wide unique resource identifier assigned by bioimage.io; version unspecific.

parent: Optional[bioimageio.spec.notebook.v0_3.NotebookId] = None

The description from which this one is derived

source: Union[Annotated[bioimageio.spec._internal.url.HttpUrl, WithSuffix(suffix='.ipynb', case_sensitive=True)], Annotated[pathlib.Path, PathType(path_type='file'), FieldInfo(annotation=NoneType, required=True, title='FilePath'), WithSuffix(suffix='.ipynb', case_sensitive=True)], Annotated[bioimageio.spec._internal.io.RelativeFilePath, WithSuffix(suffix='.ipynb', case_sensitive=True)]] = PydanticUndefined

The Jupyter notebook

implemented_format_version_tuple: ClassVar[Tuple[int, int, int]] = (0, 3, 0)
ResourceDescr = typing.Union[typing.Annotated[typing.Union[typing.Annotated[typing.Union[typing.Annotated[bioimageio.spec.application.v0_2.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.2')], typing.Annotated[ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='application')], typing.Annotated[typing.Union[typing.Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], typing.Annotated[DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')], typing.Annotated[typing.Union[typing.Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], typing.Annotated[ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')], typing.Annotated[typing.Union[typing.Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.2')], typing.Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='notebook')]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], typing.Annotated[typing.Union[typing.Annotated[bioimageio.spec.generic.v0_2.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.2')], typing.Annotated[GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='generic')]]
def save_bioimageio_package_as_folder( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file'), FieldInfo(annotation=NoneType, required=True, title='FilePath')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, pydantic.networks.HttpUrl, zipfile.ZipFile, Dict[str, YamlValue], Mapping[str, YamlValueView], Annotated[Union[Annotated[Union[Annotated[bioimageio.spec.application.v0_2.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.2')], Annotated[ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='application')], Annotated[Union[Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], Annotated[DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')], Annotated[Union[Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], Annotated[ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')], Annotated[Union[Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.2')], Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='notebook')]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.generic.v0_2.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.2')], Annotated[GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='generic')]], /, *, output_path: Union[Annotated[pathlib.Path, PathType(path_type='new')], Annotated[pathlib.Path, PathType(path_type='dir')], NoneType] = None, weights_priority_order: Optional[Sequence[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript']]] = None) -> Annotated[pathlib.Path, PathType(path_type='dir')]:
150def save_bioimageio_package_as_folder(
151    source: Union[BioimageioYamlSource, ResourceDescr],
152    /,
153    *,
154    output_path: Union[NewPath, DirectoryPath, None] = None,
155    weights_priority_order: Optional[  # model only
156        Sequence[
157            Literal[
158                "keras_hdf5",
159                "onnx",
160                "pytorch_state_dict",
161                "tensorflow_js",
162                "tensorflow_saved_model_bundle",
163                "torchscript",
164            ]
165        ]
166    ] = None,
167) -> DirectoryPath:
168    """Write the content of a bioimage.io resource package to a folder.
169
170    Args:
171        source: bioimageio resource description
172        output_path: file path to write package to
173        weights_priority_order: If given only the first weights format present in the model is included.
174                                If none of the prioritized weights formats is found all are included.
175
176    Returns:
177        directory path to bioimageio package folder
178    """
179    package_content = _prepare_resource_package(
180        source,
181        weights_priority_order=weights_priority_order,
182    )
183    if output_path is None:
184        output_path = Path(mkdtemp())
185    else:
186        output_path = Path(output_path)
187
188    output_path.mkdir(exist_ok=True, parents=True)
189    for name, src in package_content.items():
190        if isinstance(src, collections.abc.Mapping):
191            write_yaml(src, output_path / name)
192        elif (
193            isinstance(src.original_root, Path)
194            and src.original_root / src.original_file_name
195            == (output_path / name).resolve()
196        ):
197            logger.debug(
198                f"Not copying {src.original_root / src.original_file_name} to itself."
199            )
200        else:
201            if isinstance(src.original_root, Path):
202                logger.debug(
203                    f"Copying from path {src.original_root / src.original_file_name} to {output_path / name}."
204                )
205            else:
206                logger.debug(
207                    f"Copying {src.original_root}/{src.original_file_name} to {output_path / name}."
208                )
209            with (output_path / name).open("wb") as dest:
210                _ = shutil.copyfileobj(src, dest)
211
212    return output_path

Write the content of a bioimage.io resource package to a folder.

Arguments:
  • source: bioimageio resource description
  • output_path: file path to write package to
  • weights_priority_order: If given only the first weights format present in the model is included. If none of the prioritized weights formats is found all are included.
Returns:

directory path to bioimageio package folder

def save_bioimageio_package_to_stream( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file'), FieldInfo(annotation=NoneType, required=True, title='FilePath')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, pydantic.networks.HttpUrl, zipfile.ZipFile, Dict[str, YamlValue], Mapping[str, YamlValueView], Annotated[Union[Annotated[Union[Annotated[bioimageio.spec.application.v0_2.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.2')], Annotated[ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='application')], Annotated[Union[Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], Annotated[DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')], Annotated[Union[Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], Annotated[ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')], Annotated[Union[Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.2')], Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='notebook')]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.generic.v0_2.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.2')], Annotated[GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='generic')]], /, *, compression: int = 8, compression_level: int = 1, output_stream: Optional[IO[bytes]] = None, weights_priority_order: Optional[Sequence[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript']]] = None) -> IO[bytes]:
279def save_bioimageio_package_to_stream(
280    source: Union[BioimageioYamlSource, ResourceDescr],
281    /,
282    *,
283    compression: int = ZIP_DEFLATED,
284    compression_level: int = 1,
285    output_stream: Union[IO[bytes], None] = None,
286    weights_priority_order: Optional[  # model only
287        Sequence[
288            Literal[
289                "keras_hdf5",
290                "onnx",
291                "pytorch_state_dict",
292                "tensorflow_js",
293                "tensorflow_saved_model_bundle",
294                "torchscript",
295            ]
296        ]
297    ] = None,
298) -> IO[bytes]:
299    """Package a bioimageio resource into a stream.
300
301    Args:
302        source: bioimageio resource description
303        compression: The numeric constant of compression method.
304        compression_level: Compression level to use when writing files to the archive.
305                           See https://docs.python.org/3/library/zipfile.html#zipfile.ZipFile
306        output_stream: stream to write package to
307        weights_priority_order: If given only the first weights format present in the model is included.
308                                If none of the prioritized weights formats is found all are included.
309
310    Note: this function bypasses safety checks and does not load/validate the model after writing.
311
312    Returns:
313        stream of zipped bioimageio package
314    """
315    if output_stream is None:
316        output_stream = BytesIO()
317
318    package_content = _prepare_resource_package(
319        source,
320        weights_priority_order=weights_priority_order,
321    )
322
323    write_zip(
324        output_stream,
325        package_content,
326        compression=compression,
327        compression_level=compression_level,
328    )
329
330    return output_stream

Package a bioimageio resource into a stream.

Arguments:
  • source: bioimageio resource description
  • compression: The numeric constant of compression method.
  • compression_level: Compression level to use when writing files to the archive. See https://docs.python.org/3/library/zipfile.html#zipfile.ZipFile
  • output_stream: stream to write package to
  • weights_priority_order: If given only the first weights format present in the model is included. If none of the prioritized weights formats is found all are included.

Note: this function bypasses safety checks and does not load/validate the model after writing.

Returns:

stream of zipped bioimageio package

def save_bioimageio_package( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file'), FieldInfo(annotation=NoneType, required=True, title='FilePath')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, pydantic.networks.HttpUrl, zipfile.ZipFile, Dict[str, YamlValue], Mapping[str, YamlValueView], Annotated[Union[Annotated[Union[Annotated[bioimageio.spec.application.v0_2.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.2')], Annotated[ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='application')], Annotated[Union[Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], Annotated[DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')], Annotated[Union[Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], Annotated[ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')], Annotated[Union[Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.2')], Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='notebook')]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.generic.v0_2.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.2')], Annotated[GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='generic')]], /, *, compression: int = 8, compression_level: int = 1, output_path: Union[Annotated[pathlib.Path, PathType(path_type='new')], Annotated[pathlib.Path, PathType(path_type='file')], NoneType] = None, weights_priority_order: Optional[Sequence[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript']]] = None, allow_invalid: bool = False) -> Annotated[pathlib.Path, PathType(path_type='file')]:
215def save_bioimageio_package(
216    source: Union[BioimageioYamlSource, ResourceDescr],
217    /,
218    *,
219    compression: int = ZIP_DEFLATED,
220    compression_level: int = 1,
221    output_path: Union[NewPath, FilePath, None] = None,
222    weights_priority_order: Optional[  # model only
223        Sequence[
224            Literal[
225                "keras_hdf5",
226                "onnx",
227                "pytorch_state_dict",
228                "tensorflow_js",
229                "tensorflow_saved_model_bundle",
230                "torchscript",
231            ]
232        ]
233    ] = None,
234    allow_invalid: bool = False,
235) -> FilePath:
236    """Package a bioimageio resource as a zip file.
237
238    Args:
239        source: bioimageio resource description
240        compression: The numeric constant of compression method.
241        compression_level: Compression level to use when writing files to the archive.
242                           See https://docs.python.org/3/library/zipfile.html#zipfile.ZipFile
243        output_path: file path to write package to
244        weights_priority_order: If given only the first weights format present in the model is included.
245                                If none of the prioritized weights formats is found all are included.
246
247    Returns:
248        path to zipped bioimageio package
249    """
250    package_content = _prepare_resource_package(
251        source,
252        weights_priority_order=weights_priority_order,
253    )
254    if output_path is None:
255        output_path = Path(
256            NamedTemporaryFile(suffix=".bioimageio.zip", delete=False).name
257        )
258    else:
259        output_path = Path(output_path)
260
261    write_zip(
262        output_path,
263        package_content,
264        compression=compression,
265        compression_level=compression_level,
266    )
267    with get_validation_context().replace(warning_level=ERROR):
268        if isinstance((exported := load_description(output_path)), InvalidDescr):
269            exported.validation_summary.display()
270            msg = f"Exported package at '{output_path}' is invalid."
271            if allow_invalid:
272                logger.error(msg)
273            else:
274                raise ValueError(msg)
275
276    return output_path

Package a bioimageio resource as a zip file.

Arguments:
  • source: bioimageio resource description
  • compression: The numeric constant of compression method.
  • compression_level: Compression level to use when writing files to the archive. See https://docs.python.org/3/library/zipfile.html#zipfile.ZipFile
  • output_path: file path to write package to
  • weights_priority_order: If given only the first weights format present in the model is included. If none of the prioritized weights formats is found all are included.
Returns:

path to zipped bioimageio package

def save_bioimageio_yaml_only( rd: Union[Annotated[Union[Annotated[Union[Annotated[bioimageio.spec.application.v0_2.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.2')], Annotated[ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='application')], Annotated[Union[Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], Annotated[DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')], Annotated[Union[Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], Annotated[ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')], Annotated[Union[Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.2')], Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='notebook')]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.generic.v0_2.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.2')], Annotated[GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='generic')], Dict[str, YamlValue], InvalidDescr], /, file: Union[Annotated[pathlib.Path, PathType(path_type='new')], Annotated[pathlib.Path, PathType(path_type='file')], TextIO], *, exclude_unset: bool = True, exclude_defaults: bool = False):
213def save_bioimageio_yaml_only(
214    rd: Union[ResourceDescr, BioimageioYamlContent, InvalidDescr],
215    /,
216    file: Union[NewPath, FilePath, TextIO],
217    *,
218    exclude_unset: bool = True,
219    exclude_defaults: bool = False,
220):
221    """write the metadata of a resource description (`rd`) to `file`
222    without writing any of the referenced files in it.
223
224    Args:
225        rd: bioimageio resource description
226        file: file or stream to save to
227        exclude_unset: Exclude fields that have not explicitly be set.
228        exclude_defaults: Exclude fields that have the default value (even if set explicitly).
229
230    Note: To save a resource description with its associated files as a package,
231    use `save_bioimageio_package` or `save_bioimageio_package_as_folder`.
232    """
233    if isinstance(rd, ResourceDescrBase):
234        content = dump_description(
235            rd, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults
236        )
237    else:
238        content = rd
239
240    write_yaml(cast(YamlValue, content), file)

write the metadata of a resource description (rd) to file without writing any of the referenced files in it.

Arguments:
  • rd: bioimageio resource description
  • file: file or stream to save to
  • exclude_unset: Exclude fields that have not explicitly be set.
  • exclude_defaults: Exclude fields that have the default value (even if set explicitly).

Note: To save a resource description with its associated files as a package, use save_bioimageio_package or save_bioimageio_package_as_folder.

settings = Settings(allow_pickle=False, cache_path=PosixPath('/home/runner/.cache/bioimageio'), collection_http_pattern='https://hypha.aicell.io/bioimage-io/artifacts/{bioimageio_id}/files/rdf.yaml', hypha_upload='https://hypha.aicell.io/public/services/artifact-manager/create', hypha_upload_token=None, http_timeout=10.0, id_map='https://uk1s3.embassy.ebi.ac.uk/public-datasets/bioimage.io/id_map.json', id_map_draft='https://uk1s3.embassy.ebi.ac.uk/public-datasets/bioimage.io/id_map_draft.json', perform_io_checks=True, resolve_draft=True, log_warnings=True, github_username=None, github_token=None, CI='true', user_agent=None)
SpecificResourceDescr = typing.Annotated[typing.Union[typing.Annotated[typing.Union[typing.Annotated[bioimageio.spec.application.v0_2.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.2')], typing.Annotated[ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='application')], typing.Annotated[typing.Union[typing.Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], typing.Annotated[DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')], typing.Annotated[typing.Union[typing.Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], typing.Annotated[ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')], typing.Annotated[typing.Union[typing.Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.2')], typing.Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='notebook')]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)]
def update_format( source: Union[Annotated[Union[Annotated[Union[Annotated[bioimageio.spec.application.v0_2.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.2')], Annotated[ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='application')], Annotated[Union[Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], Annotated[DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')], Annotated[Union[Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], Annotated[ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')], Annotated[Union[Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.2')], Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='notebook')]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.generic.v0_2.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.2')], Annotated[GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='generic')], Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file'), FieldInfo(annotation=NoneType, required=True, title='FilePath')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, pydantic.networks.HttpUrl, zipfile.ZipFile, Dict[str, YamlValue], InvalidDescr], /, *, output: Union[pathlib.Path, TextIO, NoneType] = None, exclude_defaults: bool = True, perform_io_checks: Optional[bool] = None) -> Union[Annotated[Union[ApplicationDescr, DatasetDescr, ModelDescr, NotebookDescr], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], GenericDescr, InvalidDescr]:
269def update_format(
270    source: Union[
271        ResourceDescr,
272        PermissiveFileSource,
273        ZipFile,
274        BioimageioYamlContent,
275        InvalidDescr,
276    ],
277    /,
278    *,
279    output: Union[Path, TextIO, None] = None,
280    exclude_defaults: bool = True,
281    perform_io_checks: Optional[bool] = None,
282) -> Union[LatestResourceDescr, InvalidDescr]:
283    """Update a resource description.
284
285    Notes:
286    - Invalid **source** descriptions may fail to update.
287    - The updated description might be invalid (even if the **source** was valid).
288    """
289
290    if isinstance(source, ResourceDescrBase):
291        root = source.root
292        source = dump_description(source)
293    else:
294        root = None
295
296    if isinstance(source, collections.abc.Mapping):
297        descr = build_description(
298            source,
299            context=get_validation_context().replace(
300                root=root, perform_io_checks=perform_io_checks
301            ),
302            format_version=LATEST,
303        )
304
305    else:
306        descr = load_description(
307            source,
308            perform_io_checks=perform_io_checks,
309            format_version=LATEST,
310        )
311
312    if output is not None:
313        save_bioimageio_yaml_only(descr, file=output, exclude_defaults=exclude_defaults)
314
315    return descr

Update a resource description.

Notes:

  • Invalid source descriptions may fail to update.
  • The updated description might be invalid (even if the source was valid).
def update_hashes( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file'), FieldInfo(annotation=NoneType, required=True, title='FilePath')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, pydantic.networks.HttpUrl, zipfile.ZipFile, Annotated[Union[Annotated[Union[Annotated[bioimageio.spec.application.v0_2.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.2')], Annotated[ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='application')], Annotated[Union[Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], Annotated[DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')], Annotated[Union[Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], Annotated[ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')], Annotated[Union[Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.2')], Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='notebook')]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.generic.v0_2.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.2')], Annotated[GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='generic')], Dict[str, YamlValue]], /) -> Union[Annotated[Union[Annotated[Union[Annotated[bioimageio.spec.application.v0_2.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.2')], Annotated[ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='application')], Annotated[Union[Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], Annotated[DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')], Annotated[Union[Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], Annotated[ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')], Annotated[Union[Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.2')], Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='notebook')]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.generic.v0_2.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.2')], Annotated[GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='generic')], InvalidDescr]:
318def update_hashes(
319    source: Union[PermissiveFileSource, ZipFile, ResourceDescr, BioimageioYamlContent],
320    /,
321) -> Union[ResourceDescr, InvalidDescr]:
322    """Update hash values of the files referenced in **source**."""
323    if isinstance(source, ResourceDescrBase):
324        root = source.root
325        source = dump_description(source)
326    else:
327        root = None
328
329    context = get_validation_context().replace(
330        update_hashes=True, root=root, perform_io_checks=True
331    )
332    with context:
333        if isinstance(source, collections.abc.Mapping):
334            return build_description(source)
335        else:
336            return load_description(source, perform_io_checks=True)

Update hash values of the files referenced in source.

def upload( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file'), FieldInfo(annotation=NoneType, required=True, title='FilePath')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, pydantic.networks.HttpUrl, zipfile.ZipFile, Annotated[Union[Annotated[Union[Annotated[bioimageio.spec.application.v0_2.ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.2')], Annotated[ApplicationDescr, FieldInfo(annotation=NoneType, required=True, title='application 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='application')], Annotated[Union[Annotated[bioimageio.spec.dataset.v0_2.DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.2')], Annotated[DatasetDescr, FieldInfo(annotation=NoneType, required=True, title='dataset 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='dataset')], Annotated[Union[Annotated[bioimageio.spec.model.v0_4.ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.4')], Annotated[ModelDescr, FieldInfo(annotation=NoneType, required=True, title='model 0.5')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='model')], Annotated[Union[Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.2')], Annotated[NotebookDescr, FieldInfo(annotation=NoneType, required=True, title='notebook 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='notebook')]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.generic.v0_2.GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.2')], Annotated[GenericDescr, FieldInfo(annotation=NoneType, required=True, title='generic 0.3')]], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None), FieldInfo(annotation=NoneType, required=True, title='generic')], Dict[str, YamlValue]], /) -> bioimageio.spec._internal.url.HttpUrl:
 28def upload(
 29    source: Union[PermissiveFileSource, ZipFile, ResourceDescr, BioimageioYamlContent],
 30    /,
 31) -> HttpUrl:
 32    """Upload a new resource description (version) to the hypha server to be shared at bioimage.io.
 33    To edit an existing resource **version**, please login to https://bioimage.io and use the web interface.
 34
 35    WARNING: This upload function is in alpha stage and might change in the future.
 36
 37    Args:
 38        source: The resource description to upload.
 39
 40    Returns:
 41        A URL to the uploaded resource description.
 42        Note: It might take some time until the resource is processed and available for download from the returned URL.
 43    """
 44
 45    if settings.hypha_upload_token is None:
 46        raise ValueError(
 47            """
 48Upload token is not set. Please set BIOIMAGEIO_HYPHA_UPLOAD_TOKEN in your environment variables.
 49By setting this token you agree to our terms of service at https://bioimage.io/#/toc.
 50
 51How to obtain a token:
 52    1. Login to https://bioimage.io
 53    2. Generate a new token at https://bioimage.io/#/api?tab=hypha-rpc
 54"""
 55        )
 56
 57    if isinstance(source, ResourceDescrBase):
 58        # If source is already a ResourceDescr, we can use it directly
 59        descr = source
 60    elif isinstance(source, dict):
 61        descr = build_description(source)
 62    else:
 63        descr = load_description(source)
 64
 65    if isinstance(descr, InvalidDescr):
 66        raise ValueError("Uploading invalid resource descriptions is not allowed.")
 67
 68    if descr.type != "model":
 69        raise NotImplementedError(
 70            f"For now, only model resources can be uploaded (got type={descr.type})."
 71        )
 72
 73    if descr.id is not None:
 74        raise ValueError(
 75            "You cannot upload a resource with an id. Please remove the id from the description and make sure to upload a new non-existing resource. To edit an existing resource, please use the web interface at https://bioimage.io."
 76        )
 77
 78    content = get_resource_package_content(descr)
 79
 80    metadata = content[BIOIMAGEIO_YAML]
 81    assert isinstance(metadata, dict)
 82    manifest = dict(metadata)
 83
 84    # only admins can upload a resource with a version
 85    artifact_version = "stage"  # if descr.version is None else str(descr.version)
 86
 87    # Create new model
 88    r = httpx.post(
 89        settings.hypha_upload,
 90        json={
 91            "parent_id": "bioimage-io/bioimage.io",
 92            "alias": (
 93                descr.id or "{animal_adjective}-{animal}"
 94            ),  # TODO: adapt for non-model uploads,
 95            "type": descr.type,
 96            "manifest": manifest,
 97            "version": artifact_version,
 98        },
 99        headers=(
100            headers := {
101                "Authorization": f"Bearer {settings.hypha_upload_token}",
102                "Content-Type": "application/json",
103            }
104        ),
105    )
106
107    response = r.json()
108    artifact_id = response.get("id")
109    if artifact_id is None:
110        try:
111            logger.error("Response detail: {}", "".join(response["detail"]))
112        except Exception:
113            logger.error("Response: {}", response)
114
115        raise RuntimeError(f"Upload did not return resource id: {response}")
116    else:
117        logger.info("Uploaded resource description {}", artifact_id)
118
119    for file_name, file_source in content.items():
120        # Get upload URL for a file
121        response = httpx.post(
122            settings.hypha_upload.replace("/create", "/put_file"),
123            json={
124                "artifact_id": artifact_id,
125                "file_path": file_name,
126            },
127            headers=headers,
128            follow_redirects=True,
129        )
130        upload_url = response.raise_for_status().json()
131
132        # Upload file to the provided URL
133        if isinstance(file_source, collections.abc.Mapping):
134            buf = io.BytesIO()
135            write_yaml(file_source, buf)
136            files = {file_name: buf}
137        else:
138            files = {file_name: get_reader(file_source)}
139
140        response = httpx.put(
141            upload_url,
142            files=files,  # pyright: ignore[reportArgumentType]
143            # TODO: follow up on https://github.com/encode/httpx/discussions/3611
144            headers={"Content-Type": ""},  # Important for S3 uploads
145            follow_redirects=True,
146        )
147        logger.info("Uploaded '{}' successfully", file_name)
148
149    # Update model status
150    manifest["status"] = "request-review"
151    response = httpx.post(
152        settings.hypha_upload.replace("/create", "/edit"),
153        json={
154            "artifact_id": artifact_id,
155            "version": artifact_version,
156            "manifest": manifest,
157        },
158        headers=headers,
159        follow_redirects=True,
160    )
161    logger.info(
162        "Updated status of {}/{} to 'request-review'", artifact_id, artifact_version
163    )
164    logger.warning(
165        "Upload successfull. Please note that the uploaded resource might not be available for download immediately."
166    )
167    with get_validation_context().replace(perform_io_checks=False):
168        return HttpUrl(
169            f"https://hypha.aicell.io/bioimage-io/artifacts/{artifact_id}/files/rdf.yaml?version={artifact_version}"
170        )

Upload a new resource description (version) to the hypha server to be shared at bioimage.io. To edit an existing resource version, please login to https://bioimage.io and use the web interface.

WARNING: This upload function is in alpha stage and might change in the future.

Arguments:
  • source: The resource description to upload.
Returns:

A URL to the uploaded resource description. Note: It might take some time until the resource is processed and available for download from the returned URL.

def validate_format( data: Dict[str, YamlValue], /, *, format_version: Union[Literal['latest', 'discover'], str] = 'discover', context: Optional[ValidationContext] = None) -> ValidationSummary:
212def validate_format(
213    data: BioimageioYamlContent,
214    /,
215    *,
216    format_version: Union[Literal["discover", "latest"], str] = DISCOVER,
217    context: Optional[ValidationContext] = None,
218) -> ValidationSummary:
219    """Validate a dictionary holding a bioimageio description.
220    See `bioimagieo.spec.load_description_and_validate_format_only`
221    to validate a file source.
222
223    Args:
224        data: Dictionary holding the raw bioimageio.yaml content.
225        format_version:
226            Format version to (update to and) use for validation.
227            Note:
228            - Use "latest" to convert to the latest available format version.
229            - Use "discover" to use the format version specified in the RDF.
230            - Only considers major.minor format version, ignores patch version.
231            - Conversion to lower format versions is not supported.
232        context: Validation context, see `bioimagieo.spec.ValidationContext`
233
234    Note:
235        Use `bioimagieo.spec.load_description_and_validate_format_only` to validate a
236        file source instead of loading the YAML content and creating the appropriate
237        `ValidationContext`.
238
239        Alternatively you can use `bioimagieo.spec.load_description` and access the
240        `validation_summary` attribute of the returned object.
241    """
242    with context or get_validation_context():
243        rd = build_description(data, format_version=format_version)
244
245    assert rd.validation_summary is not None
246    return rd.validation_summary

Validate a dictionary holding a bioimageio description. See bioimagieo.spec.load_description_and_validate_format_only to validate a file source.

Arguments:
  • data: Dictionary holding the raw bioimageio.yaml content.
  • format_version: Format version to (update to and) use for validation. Note:
    • Use "latest" to convert to the latest available format version.
    • Use "discover" to use the format version specified in the RDF.
    • Only considers major.minor format version, ignores patch version.
    • Conversion to lower format versions is not supported.
  • context: Validation context, see bioimagieo.spec.ValidationContext
Note:

Use bioimagieo.spec.load_description_and_validate_format_only to validate a file source instead of loading the YAML content and creating the appropriate ValidationContext.

Alternatively you can use bioimagieo.spec.load_description and access the validation_summary attribute of the returned object.

@dataclass(frozen=True)
class ValidationContext(bioimageio.spec._internal.validation_context.ValidationContextBase):
 60@dataclass(frozen=True)
 61class ValidationContext(ValidationContextBase):
 62    """A validation context used to control validation of bioimageio resources.
 63
 64    For example a relative file path in a bioimageio description requires the **root**
 65    context to evaluate if the file is available and, if **perform_io_checks** is true,
 66    if it matches its expected SHA256 hash value.
 67    """
 68
 69    _context_tokens: "List[Token[Optional[ValidationContext]]]" = field(
 70        init=False,
 71        default_factory=cast(
 72            "Callable[[], List[Token[Optional[ValidationContext]]]]", list
 73        ),
 74    )
 75
 76    cache: Union[
 77        DiskCache[RootHttpUrl], MemoryCache[RootHttpUrl], NoopCache[RootHttpUrl]
 78    ] = field(default=settings.disk_cache)
 79    disable_cache: bool = False
 80    """Disable caching downloads to `settings.cache_path`
 81    and (re)download them to memory instead."""
 82
 83    root: Union[RootHttpUrl, DirectoryPath, ZipFile] = Path()
 84    """Url/directory/archive serving as base to resolve any relative file paths."""
 85
 86    warning_level: WarningLevel = 50
 87    """Treat warnings of severity `s` as validation errors if `s >= warning_level`."""
 88
 89    log_warnings: bool = settings.log_warnings
 90    """If `True` warnings are logged to the terminal
 91
 92    Note: This setting does not affect warning entries
 93        of a generated `bioimageio.spec.ValidationSummary`.
 94    """
 95
 96    progressbar: Union[None, bool, Callable[[], Progressbar]] = None
 97    """Control any progressbar.
 98    (Currently this is only used for file downloads.)
 99
100    Can be:
101    - `None`: use a default tqdm progressbar (if not settings.CI)
102    - `True`: use a default tqdm progressbar
103    - `False`: disable the progressbar
104    - `callable`: A callable that returns a tqdm-like progressbar.
105    """
106
107    raise_errors: bool = False
108    """Directly raise any validation errors
109    instead of aggregating errors and returning a `bioimageio.spec.InvalidDescr`. (for debugging)"""
110
111    @property
112    def summary(self):
113        if isinstance(self.root, ZipFile):
114            if self.root.filename is None:
115                root = "in-memory"
116            else:
117                root = Path(self.root.filename)
118        else:
119            root = self.root
120
121        return ValidationContextSummary(
122            root=root,
123            file_name=self.file_name,
124            perform_io_checks=self.perform_io_checks,
125            known_files=copy(self.known_files),
126            update_hashes=self.update_hashes,
127        )
128
129    def __enter__(self):
130        self._context_tokens.append(_validation_context_var.set(self))
131        return self
132
133    def __exit__(self, type, value, traceback):  # type: ignore
134        _validation_context_var.reset(self._context_tokens.pop(-1))
135
136    def replace(  # TODO: probably use __replace__ when py>=3.13
137        self,
138        root: Optional[Union[RootHttpUrl, DirectoryPath, ZipFile]] = None,
139        warning_level: Optional[WarningLevel] = None,
140        log_warnings: Optional[bool] = None,
141        file_name: Optional[str] = None,
142        perform_io_checks: Optional[bool] = None,
143        known_files: Optional[Dict[str, Optional[Sha256]]] = None,
144        raise_errors: Optional[bool] = None,
145        update_hashes: Optional[bool] = None,
146        original_source_name: Optional[str] = None,
147    ) -> Self:
148        if known_files is None and root is not None and self.root != root:
149            # reset known files if root changes, but no new known_files are given
150            known_files = {}
151
152        return self.__class__(
153            root=self.root if root is None else root,
154            warning_level=(
155                self.warning_level if warning_level is None else warning_level
156            ),
157            log_warnings=self.log_warnings if log_warnings is None else log_warnings,
158            file_name=self.file_name if file_name is None else file_name,
159            perform_io_checks=(
160                self.perform_io_checks
161                if perform_io_checks is None
162                else perform_io_checks
163            ),
164            known_files=self.known_files if known_files is None else known_files,
165            raise_errors=self.raise_errors if raise_errors is None else raise_errors,
166            update_hashes=(
167                self.update_hashes if update_hashes is None else update_hashes
168            ),
169            original_source_name=(
170                self.original_source_name
171                if original_source_name is None
172                else original_source_name
173            ),
174        )
175
176    @property
177    def source_name(self) -> str:
178        if self.original_source_name is not None:
179            return self.original_source_name
180        elif self.file_name is None:
181            return "in-memory"
182        else:
183            try:
184                if isinstance(self.root, Path):
185                    source = (self.root / self.file_name).absolute()
186                else:
187                    parsed = urlsplit(str(self.root))
188                    path = list(parsed.path.strip("/").split("/")) + [self.file_name]
189                    source = urlunsplit(
190                        (
191                            parsed.scheme,
192                            parsed.netloc,
193                            "/".join(path),
194                            parsed.query,
195                            parsed.fragment,
196                        )
197                    )
198            except ValueError:
199                return self.file_name
200            else:
201                return str(source)

A validation context used to control validation of bioimageio resources.

For example a relative file path in a bioimageio description requires the root context to evaluate if the file is available and, if perform_io_checks is true, if it matches its expected SHA256 hash value.

ValidationContext( file_name: Optional[str] = None, original_source_name: Optional[str] = None, perform_io_checks: bool = True, known_files: Dict[str, Optional[bioimageio.spec._internal.io_basics.Sha256]] = <factory>, update_hashes: bool = False, cache: Union[genericache.disk_cache.DiskCache[bioimageio.spec._internal.root_url.RootHttpUrl], genericache.memory_cache.MemoryCache[bioimageio.spec._internal.root_url.RootHttpUrl], genericache.noop_cache.NoopCache[bioimageio.spec._internal.root_url.RootHttpUrl]] = <genericache.disk_cache.DiskCache object>, disable_cache: bool = False, root: Union[bioimageio.spec._internal.root_url.RootHttpUrl, Annotated[pathlib.Path, PathType(path_type='dir')], zipfile.ZipFile] = PosixPath('.'), warning_level: Literal[20, 30, 35, 50] = 50, log_warnings: bool = True, progressbar: Union[NoneType, bool, Callable[[], bioimageio.spec._internal.progress.Progressbar]] = None, raise_errors: bool = False)
cache: Union[genericache.disk_cache.DiskCache[bioimageio.spec._internal.root_url.RootHttpUrl], genericache.memory_cache.MemoryCache[bioimageio.spec._internal.root_url.RootHttpUrl], genericache.noop_cache.NoopCache[bioimageio.spec._internal.root_url.RootHttpUrl]] = <genericache.disk_cache.DiskCache object>
disable_cache: bool = False

Disable caching downloads to settings.cache_path and (re)download them to memory instead.

root: Union[bioimageio.spec._internal.root_url.RootHttpUrl, Annotated[pathlib.Path, PathType(path_type='dir')], zipfile.ZipFile] = PosixPath('.')

Url/directory/archive serving as base to resolve any relative file paths.

warning_level: Literal[20, 30, 35, 50] = 50

Treat warnings of severity s as validation errors if s >= warning_level.

log_warnings: bool = True

If True warnings are logged to the terminal

Note: This setting does not affect warning entries of a generated bioimageio.spec.ValidationSummary.

progressbar: Union[NoneType, bool, Callable[[], bioimageio.spec._internal.progress.Progressbar]] = None

Control any progressbar. (Currently this is only used for file downloads.)

Can be:

  • None: use a default tqdm progressbar (if not settings.CI)
  • True: use a default tqdm progressbar
  • False: disable the progressbar
  • callable: A callable that returns a tqdm-like progressbar.
raise_errors: bool = False

Directly raise any validation errors instead of aggregating errors and returning a bioimageio.spec.InvalidDescr. (for debugging)

summary
111    @property
112    def summary(self):
113        if isinstance(self.root, ZipFile):
114            if self.root.filename is None:
115                root = "in-memory"
116            else:
117                root = Path(self.root.filename)
118        else:
119            root = self.root
120
121        return ValidationContextSummary(
122            root=root,
123            file_name=self.file_name,
124            perform_io_checks=self.perform_io_checks,
125            known_files=copy(self.known_files),
126            update_hashes=self.update_hashes,
127        )
def replace( self, root: Union[bioimageio.spec._internal.root_url.RootHttpUrl, Annotated[pathlib.Path, PathType(path_type='dir')], zipfile.ZipFile, NoneType] = None, warning_level: Optional[Literal[20, 30, 35, 50]] = None, log_warnings: Optional[bool] = None, file_name: Optional[str] = None, perform_io_checks: Optional[bool] = None, known_files: Optional[Dict[str, Optional[bioimageio.spec._internal.io_basics.Sha256]]] = None, raise_errors: Optional[bool] = None, update_hashes: Optional[bool] = None, original_source_name: Optional[str] = None) -> Self:
136    def replace(  # TODO: probably use __replace__ when py>=3.13
137        self,
138        root: Optional[Union[RootHttpUrl, DirectoryPath, ZipFile]] = None,
139        warning_level: Optional[WarningLevel] = None,
140        log_warnings: Optional[bool] = None,
141        file_name: Optional[str] = None,
142        perform_io_checks: Optional[bool] = None,
143        known_files: Optional[Dict[str, Optional[Sha256]]] = None,
144        raise_errors: Optional[bool] = None,
145        update_hashes: Optional[bool] = None,
146        original_source_name: Optional[str] = None,
147    ) -> Self:
148        if known_files is None and root is not None and self.root != root:
149            # reset known files if root changes, but no new known_files are given
150            known_files = {}
151
152        return self.__class__(
153            root=self.root if root is None else root,
154            warning_level=(
155                self.warning_level if warning_level is None else warning_level
156            ),
157            log_warnings=self.log_warnings if log_warnings is None else log_warnings,
158            file_name=self.file_name if file_name is None else file_name,
159            perform_io_checks=(
160                self.perform_io_checks
161                if perform_io_checks is None
162                else perform_io_checks
163            ),
164            known_files=self.known_files if known_files is None else known_files,
165            raise_errors=self.raise_errors if raise_errors is None else raise_errors,
166            update_hashes=(
167                self.update_hashes if update_hashes is None else update_hashes
168            ),
169            original_source_name=(
170                self.original_source_name
171                if original_source_name is None
172                else original_source_name
173            ),
174        )
source_name: str
176    @property
177    def source_name(self) -> str:
178        if self.original_source_name is not None:
179            return self.original_source_name
180        elif self.file_name is None:
181            return "in-memory"
182        else:
183            try:
184                if isinstance(self.root, Path):
185                    source = (self.root / self.file_name).absolute()
186                else:
187                    parsed = urlsplit(str(self.root))
188                    path = list(parsed.path.strip("/").split("/")) + [self.file_name]
189                    source = urlunsplit(
190                        (
191                            parsed.scheme,
192                            parsed.netloc,
193                            "/".join(path),
194                            parsed.query,
195                            parsed.fragment,
196                        )
197                    )
198            except ValueError:
199                return self.file_name
200            else:
201                return str(source)
class ValidationSummary(pydantic.main.BaseModel):
243class ValidationSummary(BaseModel, extra="allow"):
244    """Summarizes output of all bioimageio validations and tests
245    for one specific `ResourceDescr` instance."""
246
247    name: str
248    """Name of the validation"""
249
250    source_name: str
251    """Source of the validated bioimageio description"""
252
253    id: Optional[str] = None
254    """ID of the resource being validated"""
255
256    type: str
257    """Type of the resource being validated"""
258
259    format_version: str
260    """Format version of the resource being validated"""
261
262    status: Literal["passed", "valid-format", "failed"]
263    """Overall status of the bioimageio validation"""
264
265    metadata_completeness: Annotated[float, annotated_types.Interval(ge=0, le=1)] = 0.0
266    """Estimate of completeness of the metadata in the resource description.
267
268    Note: This completeness estimate may change with subsequent releases
269        and should be considered bioimageio.spec version specific.
270    """
271
272    details: List[ValidationDetail]
273    """List of validation details"""
274    env: Set[InstalledPackage] = Field(
275        default_factory=lambda: {
276            InstalledPackage(
277                name="bioimageio.spec",
278                version=VERSION,
279            )
280        }
281    )
282    """List of selected, relevant package versions"""
283
284    saved_conda_list: Optional[str] = None
285
286    @field_serializer("saved_conda_list")
287    def _save_conda_list(self, value: Optional[str]):
288        return self.conda_list
289
290    @property
291    def conda_list(self):
292        if self.saved_conda_list is None:
293            p = subprocess.run(
294                [CONDA_CMD, "list"],
295                stdout=subprocess.PIPE,
296                stderr=subprocess.STDOUT,
297                shell=False,
298                text=True,
299            )
300            self.saved_conda_list = (
301                p.stdout or f"`conda list` exited with {p.returncode}"
302            )
303
304        return self.saved_conda_list
305
306    @property
307    def status_icon(self):
308        if self.status == "passed":
309            return "✔️"
310        elif self.status == "valid-format":
311            return "🟡"
312        else:
313            return "❌"
314
315    @property
316    def errors(self) -> List[ErrorEntry]:
317        return list(chain.from_iterable(d.errors for d in self.details))
318
319    @property
320    def warnings(self) -> List[WarningEntry]:
321        return list(chain.from_iterable(d.warnings for d in self.details))
322
323    def format(
324        self,
325        *,
326        width: Optional[int] = None,
327        include_conda_list: bool = False,
328    ):
329        """Format summary as Markdown string"""
330        return self._format(
331            width=width, target="md", include_conda_list=include_conda_list
332        )
333
334    format_md = format
335
336    def format_html(
337        self,
338        *,
339        width: Optional[int] = None,
340        include_conda_list: bool = False,
341    ):
342        md_with_html = self._format(
343            target="html", width=width, include_conda_list=include_conda_list
344        )
345        return markdown.markdown(
346            md_with_html, extensions=["tables", "fenced_code", "nl2br"]
347        )
348
349    def display(
350        self,
351        *,
352        width: Optional[int] = None,
353        include_conda_list: bool = False,
354        tab_size: int = 4,
355        soft_wrap: bool = True,
356    ) -> None:
357        try:  # render as HTML in Jupyter notebook
358            from IPython.core.getipython import get_ipython
359            from IPython.display import (
360                display_html,  # pyright: ignore[reportUnknownVariableType]
361            )
362        except ImportError:
363            pass
364        else:
365            if get_ipython() is not None:
366                _ = display_html(
367                    self.format_html(
368                        width=width, include_conda_list=include_conda_list
369                    ),
370                    raw=True,
371                )
372                return
373
374        # render with rich
375        _ = self._format(
376            target=rich.console.Console(
377                width=width,
378                tab_size=tab_size,
379                soft_wrap=soft_wrap,
380            ),
381            width=width,
382            include_conda_list=include_conda_list,
383        )
384
385    def add_detail(self, detail: ValidationDetail, update_status: bool = True):
386        if update_status:
387            if self.status == "valid-format" and detail.status == "passed":
388                # once status is 'valid-format' we can only improve to 'passed'
389                self.status = "passed"
390            elif self.status == "passed" and detail.status == "failed":
391                # once status is 'passed' it can only degrade to 'valid-format'
392                self.status = "valid-format"
393            # once format is 'failed' it cannot improve
394
395        self.details.append(detail)
396
397    def log(
398        self,
399        to: Union[Literal["display"], Path, Sequence[Union[Literal["display"], Path]]],
400    ) -> List[Path]:
401        """Convenience method to display the validation summary in the terminal and/or
402        save it to disk. See `save` for details."""
403        if to == "display":
404            display = True
405            save_to = []
406        elif isinstance(to, Path):
407            display = False
408            save_to = [to]
409        else:
410            display = "display" in to
411            save_to = [p for p in to if p != "display"]
412
413        if display:
414            self.display()
415
416        return self.save(save_to)
417
418    def save(
419        self, path: Union[Path, Sequence[Path]] = Path("{id}_summary_{now}")
420    ) -> List[Path]:
421        """Save the validation/test summary in JSON, Markdown or HTML format.
422
423        Returns:
424            List of file paths the summary was saved to.
425
426        Notes:
427        - Format is chosen based on the suffix: `.json`, `.md`, `.html`.
428        - If **path** has no suffix it is assumed to be a direcotry to which a
429          `summary.json`, `summary.md` and `summary.html` are saved to.
430        """
431        if isinstance(path, (str, Path)):
432            path = [Path(path)]
433
434        # folder to file paths
435        file_paths: List[Path] = []
436        for p in path:
437            if p.suffix:
438                file_paths.append(p)
439            else:
440                file_paths.extend(
441                    [
442                        p / "summary.json",
443                        p / "summary.md",
444                        p / "summary.html",
445                    ]
446                )
447
448        now = datetime.now(timezone.utc).strftime("%Y%m%dT%H%M%SZ")
449        for p in file_paths:
450            p = Path(str(p).format(id=self.id or "bioimageio", now=now))
451            if p.suffix == ".json":
452                self.save_json(p)
453            elif p.suffix == ".md":
454                self.save_markdown(p)
455            elif p.suffix == ".html":
456                self.save_html(p)
457            else:
458                raise ValueError(f"Unknown summary path suffix '{p.suffix}'")
459
460        return file_paths
461
462    def save_json(
463        self, path: Path = Path("summary.json"), *, indent: Optional[int] = 2
464    ):
465        """Save validation/test summary as JSON file."""
466        json_str = self.model_dump_json(indent=indent)
467        path.parent.mkdir(exist_ok=True, parents=True)
468        _ = path.write_text(json_str, encoding="utf-8")
469        logger.info("Saved summary to {}", path.absolute())
470
471    def save_markdown(self, path: Path = Path("summary.md")):
472        """Save rendered validation/test summary as Markdown file."""
473        formatted = self.format_md()
474        path.parent.mkdir(exist_ok=True, parents=True)
475        _ = path.write_text(formatted, encoding="utf-8")
476        logger.info("Saved Markdown formatted summary to {}", path.absolute())
477
478    def save_html(self, path: Path = Path("summary.html")) -> None:
479        """Save rendered validation/test summary as HTML file."""
480        path.parent.mkdir(exist_ok=True, parents=True)
481
482        html = self.format_html()
483        _ = path.write_text(html, encoding="utf-8")
484        logger.info("Saved HTML formatted summary to {}", path.absolute())
485
486    @classmethod
487    def load_json(cls, path: Path) -> Self:
488        """Load validation/test summary from a suitable JSON file"""
489        json_str = Path(path).read_text(encoding="utf-8")
490        return cls.model_validate_json(json_str)
491
492    @field_validator("env", mode="before")
493    def _convert_dict(cls, value: List[Union[List[str], Dict[str, str]]]):
494        """convert old env value for backwards compatibility"""
495        if isinstance(value, list):
496            return [
497                (
498                    (v["name"], v["version"], v.get("build", ""), v.get("channel", ""))
499                    if isinstance(v, dict) and "name" in v and "version" in v
500                    else v
501                )
502                for v in value
503            ]
504        else:
505            return value
506
507    def _format(
508        self,
509        *,
510        target: Union[rich.console.Console, Literal["html", "md"]],
511        width: Optional[int],
512        include_conda_list: bool,
513    ):
514        return _format_summary(
515            self,
516            target=target,
517            width=width or 100,
518            include_conda_list=include_conda_list,
519        )

Summarizes output of all bioimageio validations and tests for one specific ResourceDescr instance.

name: str = PydanticUndefined

Name of the validation

source_name: str = PydanticUndefined

Source of the validated bioimageio description

id: Optional[str] = None

ID of the resource being validated

type: str = PydanticUndefined

Type of the resource being validated

format_version: str = PydanticUndefined

Format version of the resource being validated

status: Literal['passed', 'valid-format', 'failed'] = PydanticUndefined

Overall status of the bioimageio validation

metadata_completeness: Annotated[float, Interval(gt=None, ge=0, lt=None, le=1)] = 0.0

Estimate of completeness of the metadata in the resource description.

Note: This completeness estimate may change with subsequent releases and should be considered bioimageio.spec version specific.

details: List[bioimageio.spec.summary.ValidationDetail] = PydanticUndefined

List of validation details

env: Set[bioimageio.spec.summary.InstalledPackage] = PydanticUndefined

List of selected, relevant package versions

saved_conda_list: Optional[str] = None
conda_list
290    @property
291    def conda_list(self):
292        if self.saved_conda_list is None:
293            p = subprocess.run(
294                [CONDA_CMD, "list"],
295                stdout=subprocess.PIPE,
296                stderr=subprocess.STDOUT,
297                shell=False,
298                text=True,
299            )
300            self.saved_conda_list = (
301                p.stdout or f"`conda list` exited with {p.returncode}"
302            )
303
304        return self.saved_conda_list
status_icon
306    @property
307    def status_icon(self):
308        if self.status == "passed":
309            return "✔️"
310        elif self.status == "valid-format":
311            return "🟡"
312        else:
313            return "❌"
errors: List[bioimageio.spec.summary.ErrorEntry]
315    @property
316    def errors(self) -> List[ErrorEntry]:
317        return list(chain.from_iterable(d.errors for d in self.details))
warnings: List[bioimageio.spec.summary.WarningEntry]
319    @property
320    def warnings(self) -> List[WarningEntry]:
321        return list(chain.from_iterable(d.warnings for d in self.details))
def format( self, *, width: Optional[int] = None, include_conda_list: bool = False):
323    def format(
324        self,
325        *,
326        width: Optional[int] = None,
327        include_conda_list: bool = False,
328    ):
329        """Format summary as Markdown string"""
330        return self._format(
331            width=width, target="md", include_conda_list=include_conda_list
332        )

Format summary as Markdown string

def format_md( self, *, width: Optional[int] = None, include_conda_list: bool = False):
323    def format(
324        self,
325        *,
326        width: Optional[int] = None,
327        include_conda_list: bool = False,
328    ):
329        """Format summary as Markdown string"""
330        return self._format(
331            width=width, target="md", include_conda_list=include_conda_list
332        )

Format summary as Markdown string

def format_html( self, *, width: Optional[int] = None, include_conda_list: bool = False):
336    def format_html(
337        self,
338        *,
339        width: Optional[int] = None,
340        include_conda_list: bool = False,
341    ):
342        md_with_html = self._format(
343            target="html", width=width, include_conda_list=include_conda_list
344        )
345        return markdown.markdown(
346            md_with_html, extensions=["tables", "fenced_code", "nl2br"]
347        )
def display( self, *, width: Optional[int] = None, include_conda_list: bool = False, tab_size: int = 4, soft_wrap: bool = True) -> None:
349    def display(
350        self,
351        *,
352        width: Optional[int] = None,
353        include_conda_list: bool = False,
354        tab_size: int = 4,
355        soft_wrap: bool = True,
356    ) -> None:
357        try:  # render as HTML in Jupyter notebook
358            from IPython.core.getipython import get_ipython
359            from IPython.display import (
360                display_html,  # pyright: ignore[reportUnknownVariableType]
361            )
362        except ImportError:
363            pass
364        else:
365            if get_ipython() is not None:
366                _ = display_html(
367                    self.format_html(
368                        width=width, include_conda_list=include_conda_list
369                    ),
370                    raw=True,
371                )
372                return
373
374        # render with rich
375        _ = self._format(
376            target=rich.console.Console(
377                width=width,
378                tab_size=tab_size,
379                soft_wrap=soft_wrap,
380            ),
381            width=width,
382            include_conda_list=include_conda_list,
383        )
def add_detail( self, detail: bioimageio.spec.summary.ValidationDetail, update_status: bool = True):
385    def add_detail(self, detail: ValidationDetail, update_status: bool = True):
386        if update_status:
387            if self.status == "valid-format" and detail.status == "passed":
388                # once status is 'valid-format' we can only improve to 'passed'
389                self.status = "passed"
390            elif self.status == "passed" and detail.status == "failed":
391                # once status is 'passed' it can only degrade to 'valid-format'
392                self.status = "valid-format"
393            # once format is 'failed' it cannot improve
394
395        self.details.append(detail)
def log( self, to: Union[Literal['display'], pathlib.Path, Sequence[Union[Literal['display'], pathlib.Path]]]) -> List[pathlib.Path]:
397    def log(
398        self,
399        to: Union[Literal["display"], Path, Sequence[Union[Literal["display"], Path]]],
400    ) -> List[Path]:
401        """Convenience method to display the validation summary in the terminal and/or
402        save it to disk. See `save` for details."""
403        if to == "display":
404            display = True
405            save_to = []
406        elif isinstance(to, Path):
407            display = False
408            save_to = [to]
409        else:
410            display = "display" in to
411            save_to = [p for p in to if p != "display"]
412
413        if display:
414            self.display()
415
416        return self.save(save_to)

Convenience method to display the validation summary in the terminal and/or save it to disk. See save for details.

def save( self, path: Union[pathlib.Path, Sequence[pathlib.Path]] = PosixPath('{id}_summary_{now}')) -> List[pathlib.Path]:
418    def save(
419        self, path: Union[Path, Sequence[Path]] = Path("{id}_summary_{now}")
420    ) -> List[Path]:
421        """Save the validation/test summary in JSON, Markdown or HTML format.
422
423        Returns:
424            List of file paths the summary was saved to.
425
426        Notes:
427        - Format is chosen based on the suffix: `.json`, `.md`, `.html`.
428        - If **path** has no suffix it is assumed to be a direcotry to which a
429          `summary.json`, `summary.md` and `summary.html` are saved to.
430        """
431        if isinstance(path, (str, Path)):
432            path = [Path(path)]
433
434        # folder to file paths
435        file_paths: List[Path] = []
436        for p in path:
437            if p.suffix:
438                file_paths.append(p)
439            else:
440                file_paths.extend(
441                    [
442                        p / "summary.json",
443                        p / "summary.md",
444                        p / "summary.html",
445                    ]
446                )
447
448        now = datetime.now(timezone.utc).strftime("%Y%m%dT%H%M%SZ")
449        for p in file_paths:
450            p = Path(str(p).format(id=self.id or "bioimageio", now=now))
451            if p.suffix == ".json":
452                self.save_json(p)
453            elif p.suffix == ".md":
454                self.save_markdown(p)
455            elif p.suffix == ".html":
456                self.save_html(p)
457            else:
458                raise ValueError(f"Unknown summary path suffix '{p.suffix}'")
459
460        return file_paths

Save the validation/test summary in JSON, Markdown or HTML format.

Returns:

List of file paths the summary was saved to.

Notes:

  • Format is chosen based on the suffix: .json, .md, .html.
  • If path has no suffix it is assumed to be a direcotry to which a summary.json, summary.md and summary.html are saved to.
def save_json( self, path: pathlib.Path = PosixPath('summary.json'), *, indent: Optional[int] = 2):
462    def save_json(
463        self, path: Path = Path("summary.json"), *, indent: Optional[int] = 2
464    ):
465        """Save validation/test summary as JSON file."""
466        json_str = self.model_dump_json(indent=indent)
467        path.parent.mkdir(exist_ok=True, parents=True)
468        _ = path.write_text(json_str, encoding="utf-8")
469        logger.info("Saved summary to {}", path.absolute())

Save validation/test summary as JSON file.

def save_markdown(self, path: pathlib.Path = PosixPath('summary.md')):
471    def save_markdown(self, path: Path = Path("summary.md")):
472        """Save rendered validation/test summary as Markdown file."""
473        formatted = self.format_md()
474        path.parent.mkdir(exist_ok=True, parents=True)
475        _ = path.write_text(formatted, encoding="utf-8")
476        logger.info("Saved Markdown formatted summary to {}", path.absolute())

Save rendered validation/test summary as Markdown file.

def save_html(self, path: pathlib.Path = PosixPath('summary.html')) -> None:
478    def save_html(self, path: Path = Path("summary.html")) -> None:
479        """Save rendered validation/test summary as HTML file."""
480        path.parent.mkdir(exist_ok=True, parents=True)
481
482        html = self.format_html()
483        _ = path.write_text(html, encoding="utf-8")
484        logger.info("Saved HTML formatted summary to {}", path.absolute())

Save rendered validation/test summary as HTML file.

@classmethod
def load_json(cls, path: pathlib.Path) -> Self:
486    @classmethod
487    def load_json(cls, path: Path) -> Self:
488        """Load validation/test summary from a suitable JSON file"""
489        json_str = Path(path).read_text(encoding="utf-8")
490        return cls.model_validate_json(json_str)

Load validation/test summary from a suitable JSON file