bioimageio.spec
1""" 2.. include:: ../../README.md 3""" 4 5# ruff: noqa: E402 6from loguru import logger 7 8logger.disable("bioimageio.spec") 9 10from . import ( 11 application, 12 common, 13 conda_env, 14 dataset, 15 generic, 16 model, 17 pretty_validation_errors, 18 summary, 19 utils, 20) 21from ._description import ( 22 LatestResourceDescr, 23 ResourceDescr, 24 SpecificResourceDescr, 25 build_description, 26 dump_description, 27 validate_format, 28) 29from ._get_conda_env import BioimageioCondaEnv, get_conda_env 30from ._internal import settings 31from ._internal.common_nodes import InvalidDescr 32from ._internal.validation_context import ValidationContext, get_validation_context 33from ._io import ( 34 load_dataset_description, 35 load_description, 36 load_description_and_validate_format_only, 37 load_model_description, 38 save_bioimageio_yaml_only, 39 update_format, 40 update_hashes, 41) 42from ._package import ( 43 get_resource_package_content, 44 save_bioimageio_package, 45 save_bioimageio_package_as_folder, 46 save_bioimageio_package_to_stream, 47) 48from ._upload import upload 49from ._version import VERSION as __version__ 50from .application import AnyApplicationDescr, ApplicationDescr 51from .dataset import AnyDatasetDescr, DatasetDescr 52from .generic import AnyGenericDescr, GenericDescr 53from .model import AnyModelDescr, ModelDescr 54from .notebook import AnyNotebookDescr, NotebookDescr 55from .pretty_validation_errors import enable_pretty_validation_errors_in_ipynb 56from .summary import ValidationSummary 57 58__all__ = [ 59 "__version__", 60 "AnyApplicationDescr", 61 "AnyDatasetDescr", 62 "AnyGenericDescr", 63 "AnyModelDescr", 64 "AnyNotebookDescr", 65 "application", 66 "ApplicationDescr", 67 "BioimageioCondaEnv", 68 "build_description", 69 "common", 70 "conda_env", 71 "dataset", 72 "DatasetDescr", 73 "dump_description", 74 "enable_pretty_validation_errors_in_ipynb", 75 "generic", 76 "GenericDescr", 77 "get_conda_env", 78 "get_resource_package_content", 79 "get_validation_context", 80 "InvalidDescr", 81 "LatestResourceDescr", 82 "load_dataset_description", 83 "load_description_and_validate_format_only", 84 "load_description", 85 "load_model_description", 86 "model", 87 "ModelDescr", 88 "NotebookDescr", 89 "pretty_validation_errors", 90 "ResourceDescr", 91 "save_bioimageio_package_as_folder", 92 "save_bioimageio_package_to_stream", 93 "save_bioimageio_package", 94 "save_bioimageio_yaml_only", 95 "settings", 96 "SpecificResourceDescr", 97 "summary", 98 "update_format", 99 "update_hashes", 100 "upload", 101 "utils", 102 "validate_format", 103 "ValidationContext", 104 "ValidationSummary", 105]
33class ApplicationDescr(GenericDescrBase): 34 """Bioimage.io description of an application.""" 35 36 implemented_type: ClassVar[Literal["application"]] = "application" 37 if TYPE_CHECKING: 38 type: Literal["application"] = "application" 39 else: 40 type: Literal["application"] 41 42 id: Optional[ApplicationId] = None 43 """bioimage.io-wide unique resource identifier 44 assigned by bioimage.io; version **un**specific.""" 45 46 parent: Optional[ApplicationId] = None 47 """The description from which this one is derived""" 48 49 source: Annotated[ 50 FAIR[Optional[FileSource_]], 51 Field(description="URL or path to the source of the application"), 52 ] = None 53 """The primary source of the application"""
Bioimage.io description of an application.
bioimage.io-wide unique resource identifier assigned by bioimage.io; version unspecific.
The description from which this one is derived
The primary source of the application
Configuration for the model, should be a dictionary conforming to [ConfigDict
][pydantic.config.ConfigDict].
337def init_private_attributes(self: BaseModel, context: Any, /) -> None: 338 """This function is meant to behave like a BaseModel method to initialise private attributes. 339 340 It takes context as an argument since that's what pydantic-core passes when calling it. 341 342 Args: 343 self: The BaseModel instance. 344 context: The context. 345 """ 346 if getattr(self, '__pydantic_private__', None) is None: 347 pydantic_private = {} 348 for name, private_attr in self.__private_attributes__.items(): 349 default = private_attr.get_default() 350 if default is not PydanticUndefined: 351 pydantic_private[name] = default 352 object_setattr(self, '__pydantic_private__', pydantic_private)
This function is meant to behave like a BaseModel method to initialise private attributes.
It takes context as an argument since that's what pydantic-core passes when calling it.
Arguments:
- self: The BaseModel instance.
- context: The context.
Inherited Members
- bioimageio.spec.generic.v0_3.GenericDescrBase
- implemented_format_version
- convert_from_old_format_wo_validation
- documentation
- badges
- config
- bioimageio.spec.generic.v0_3.GenericModelDescrBase
- name
- description
- covers
- id_emoji
- attachments
- cite
- license
- git_repo
- icon
- links
- uploader
- maintainers
- warn_about_tag_categories
- version
- version_comment
80class BioimageioCondaEnv(CondaEnv): 81 """A special `CondaEnv` that 82 - automatically adds bioimageio specific dependencies 83 - sorts dependencies 84 """ 85 86 @model_validator(mode="after") 87 def _normalize_bioimageio_conda_env(self): 88 """update a conda env such that we have bioimageio.core and sorted dependencies""" 89 for req_channel in ("conda-forge", "nodefaults"): 90 if req_channel not in self.channels: 91 self.channels.append(req_channel) 92 93 if "defaults" in self.channels: 94 warnings.warn("removing 'defaults' from conda-channels") 95 self.channels.remove("defaults") 96 97 if "pip" not in self.dependencies: 98 self.dependencies.append("pip") 99 100 for dep in self.dependencies: 101 if isinstance(dep, PipDeps): 102 pip_section = dep 103 pip_section.pip.sort() 104 break 105 else: 106 pip_section = None 107 108 if ( 109 pip_section is None 110 or not any(pd.startswith("bioimageio.core") for pd in pip_section.pip) 111 ) and not any( 112 d.startswith("bioimageio.core") 113 or d.startswith("conda-forge::bioimageio.core") 114 for d in self.dependencies 115 if not isinstance(d, PipDeps) 116 ): 117 self.dependencies.append("conda-forge::bioimageio.core") 118 119 self.dependencies.sort() 120 return self
A special CondaEnv
that
- automatically adds bioimageio specific dependencies
- sorts dependencies
Configuration for the model, should be a dictionary conforming to [ConfigDict
][pydantic.config.ConfigDict].
Inherited Members
175def build_description( 176 content: BioimageioYamlContentView, 177 /, 178 *, 179 context: Optional[ValidationContext] = None, 180 format_version: Union[FormatVersionPlaceholder, str] = DISCOVER, 181) -> Union[ResourceDescr, InvalidDescr]: 182 """build a bioimage.io resource description from an RDF's content. 183 184 Use `load_description` if you want to build a resource description from an rdf.yaml 185 or bioimage.io zip-package. 186 187 Args: 188 content: loaded rdf.yaml file (loaded with YAML, not bioimageio.spec) 189 context: validation context to use during validation 190 format_version: 191 (optional) use this argument to load the resource and 192 convert its metadata to a higher format_version. 193 Note: 194 - Use "latest" to convert to the latest available format version. 195 - Use "discover" to use the format version specified in the RDF. 196 - Only considers major.minor format version, ignores patch version. 197 - Conversion to lower format versions is not supported. 198 199 Returns: 200 An object holding all metadata of the bioimage.io resource 201 202 """ 203 204 return build_description_impl( 205 content, 206 context=context, 207 format_version=format_version, 208 get_rd_class=_get_rd_class, 209 )
build a bioimage.io resource description from an RDF's content.
Use load_description
if you want to build a resource description from an rdf.yaml
or bioimage.io zip-package.
Arguments:
- content: loaded rdf.yaml file (loaded with YAML, not bioimageio.spec)
- context: validation context to use during validation
- format_version: (optional) use this argument to load the resource and
convert its metadata to a higher format_version.
Note:
- Use "latest" to convert to the latest available format version.
- Use "discover" to use the format version specified in the RDF.
- Only considers major.minor format version, ignores patch version.
- Conversion to lower format versions is not supported.
Returns:
An object holding all metadata of the bioimage.io resource
40class DatasetDescr(GenericDescrBase): 41 """A bioimage.io dataset resource description file (dataset RDF) describes a dataset relevant to bioimage 42 processing. 43 """ 44 45 implemented_type: ClassVar[Literal["dataset"]] = "dataset" 46 if TYPE_CHECKING: 47 type: Literal["dataset"] = "dataset" 48 else: 49 type: Literal["dataset"] 50 51 id: Optional[DatasetId] = None 52 """bioimage.io-wide unique resource identifier 53 assigned by bioimage.io; version **un**specific.""" 54 55 parent: Optional[DatasetId] = None 56 """The description from which this one is derived""" 57 58 source: FAIR[Optional[HttpUrl]] = None 59 """"URL to the source of the dataset.""" 60 61 @model_validator(mode="before") 62 @classmethod 63 def _convert(cls, data: Dict[str, Any], /) -> Dict[str, Any]: 64 if ( 65 data.get("type") == "dataset" 66 and isinstance(fv := data.get("format_version"), str) 67 and fv.startswith("0.2.") 68 ): 69 old = DatasetDescr02.load(data) 70 if isinstance(old, InvalidDescr): 71 return data 72 73 return cast( 74 Dict[str, Any], 75 (cls if TYPE_CHECKING else dict)( 76 attachments=( 77 [] 78 if old.attachments is None 79 else [FileDescr(source=f) for f in old.attachments.files] 80 ), 81 authors=[_author_conv.convert_as_dict(a) for a in old.authors], # pyright: ignore[reportArgumentType] 82 badges=old.badges, 83 cite=[ 84 {"text": c.text, "doi": c.doi, "url": c.url} for c in old.cite 85 ], # pyright: ignore[reportArgumentType] 86 config=old.config, # pyright: ignore[reportArgumentType] 87 covers=old.covers, 88 description=old.description, 89 documentation=old.documentation, 90 format_version="0.3.0", 91 git_repo=old.git_repo, # pyright: ignore[reportArgumentType] 92 icon=old.icon, 93 id=None if old.id is None else DatasetId(old.id), 94 license=old.license, # type: ignore 95 links=old.links, 96 maintainers=[ 97 _maintainer_conv.convert_as_dict(m) for m in old.maintainers 98 ], # pyright: ignore[reportArgumentType] 99 name=old.name, 100 source=old.source, 101 tags=old.tags, 102 type=old.type, 103 uploader=old.uploader, 104 version=old.version, 105 **(old.model_extra or {}), 106 ), 107 ) 108 109 return data
A bioimage.io dataset resource description file (dataset RDF) describes a dataset relevant to bioimage processing.
bioimage.io-wide unique resource identifier assigned by bioimage.io; version unspecific.
The description from which this one is derived
"URL to the source of the dataset.
Configuration for the model, should be a dictionary conforming to [ConfigDict
][pydantic.config.ConfigDict].
337def init_private_attributes(self: BaseModel, context: Any, /) -> None: 338 """This function is meant to behave like a BaseModel method to initialise private attributes. 339 340 It takes context as an argument since that's what pydantic-core passes when calling it. 341 342 Args: 343 self: The BaseModel instance. 344 context: The context. 345 """ 346 if getattr(self, '__pydantic_private__', None) is None: 347 pydantic_private = {} 348 for name, private_attr in self.__private_attributes__.items(): 349 default = private_attr.get_default() 350 if default is not PydanticUndefined: 351 pydantic_private[name] = default 352 object_setattr(self, '__pydantic_private__', pydantic_private)
This function is meant to behave like a BaseModel method to initialise private attributes.
It takes context as an argument since that's what pydantic-core passes when calling it.
Arguments:
- self: The BaseModel instance.
- context: The context.
Inherited Members
- bioimageio.spec.generic.v0_3.GenericDescrBase
- implemented_format_version
- convert_from_old_format_wo_validation
- documentation
- badges
- config
- bioimageio.spec.generic.v0_3.GenericModelDescrBase
- name
- description
- covers
- id_emoji
- attachments
- cite
- license
- git_repo
- icon
- links
- uploader
- maintainers
- warn_about_tag_categories
- version
- version_comment
66def dump_description( 67 rd: Union[ResourceDescr, InvalidDescr], 68 /, 69 *, 70 exclude_unset: bool = True, 71 exclude_defaults: bool = False, 72) -> BioimageioYamlContent: 73 """Converts a resource to a dictionary containing only simple types that can directly be serialzed to YAML. 74 75 Args: 76 rd: bioimageio resource description 77 exclude_unset: Exclude fields that have not explicitly be set. 78 exclude_defaults: Exclude fields that have the default value (even if set explicitly). 79 """ 80 return rd.model_dump( 81 mode="json", exclude_unset=exclude_unset, exclude_defaults=exclude_defaults 82 )
Converts a resource to a dictionary containing only simple types that can directly be serialzed to YAML.
Arguments:
- rd: bioimageio resource description
- exclude_unset: Exclude fields that have not explicitly be set.
- exclude_defaults: Exclude fields that have the default value (even if set explicitly).
92def enable_pretty_validation_errors_in_ipynb(): 93 """DEPRECATED; this is enabled by default at import time.""" 94 warnings.warn( 95 "deprecated, this is enabled by default at import time.", 96 DeprecationWarning, 97 stacklevel=2, 98 )
DEPRECATED; this is enabled by default at import time.
490class GenericDescr(GenericDescrBase, extra="ignore"): 491 """Specification of the fields used in a generic bioimage.io-compliant resource description file (RDF). 492 493 An RDF is a YAML file that describes a resource such as a model, a dataset, or a notebook. 494 Note that those resources are described with a type-specific RDF. 495 Use this generic resource description, if none of the known specific types matches your resource. 496 """ 497 498 implemented_type: ClassVar[Literal["generic"]] = "generic" 499 if TYPE_CHECKING: 500 type: Annotated[str, LowerCase] = "generic" 501 """The resource type assigns a broad category to the resource.""" 502 else: 503 type: Annotated[str, LowerCase] 504 """The resource type assigns a broad category to the resource.""" 505 506 id: Optional[ 507 Annotated[ResourceId, Field(examples=["affable-shark", "ambitious-sloth"])] 508 ] = None 509 """bioimage.io-wide unique resource identifier 510 assigned by bioimage.io; version **un**specific.""" 511 512 parent: Optional[ResourceId] = None 513 """The description from which this one is derived""" 514 515 source: Optional[HttpUrl] = None 516 """The primary source of the resource""" 517 518 @field_validator("type", mode="after") 519 @classmethod 520 def check_specific_types(cls, value: str) -> str: 521 if value in KNOWN_SPECIFIC_RESOURCE_TYPES: 522 raise ValueError( 523 f"Use the {value} description instead of this generic description for" 524 + f" your '{value}' resource." 525 ) 526 527 return value
Specification of the fields used in a generic bioimage.io-compliant resource description file (RDF).
An RDF is a YAML file that describes a resource such as a model, a dataset, or a notebook. Note that those resources are described with a type-specific RDF. Use this generic resource description, if none of the known specific types matches your resource.
bioimage.io-wide unique resource identifier assigned by bioimage.io; version unspecific.
The description from which this one is derived
518 @field_validator("type", mode="after") 519 @classmethod 520 def check_specific_types(cls, value: str) -> str: 521 if value in KNOWN_SPECIFIC_RESOURCE_TYPES: 522 raise ValueError( 523 f"Use the {value} description instead of this generic description for" 524 + f" your '{value}' resource." 525 ) 526 527 return value
Configuration for the model, should be a dictionary conforming to [ConfigDict
][pydantic.config.ConfigDict].
337def init_private_attributes(self: BaseModel, context: Any, /) -> None: 338 """This function is meant to behave like a BaseModel method to initialise private attributes. 339 340 It takes context as an argument since that's what pydantic-core passes when calling it. 341 342 Args: 343 self: The BaseModel instance. 344 context: The context. 345 """ 346 if getattr(self, '__pydantic_private__', None) is None: 347 pydantic_private = {} 348 for name, private_attr in self.__private_attributes__.items(): 349 default = private_attr.get_default() 350 if default is not PydanticUndefined: 351 pydantic_private[name] = default 352 object_setattr(self, '__pydantic_private__', pydantic_private)
This function is meant to behave like a BaseModel method to initialise private attributes.
It takes context as an argument since that's what pydantic-core passes when calling it.
Arguments:
- self: The BaseModel instance.
- context: The context.
Inherited Members
- bioimageio.spec.generic.v0_3.GenericDescrBase
- implemented_format_version
- convert_from_old_format_wo_validation
- documentation
- badges
- config
- bioimageio.spec.generic.v0_3.GenericModelDescrBase
- name
- description
- covers
- id_emoji
- attachments
- cite
- license
- git_repo
- icon
- links
- uploader
- maintainers
- warn_about_tag_categories
- version
- version_comment
27def get_conda_env( 28 *, 29 entry: SupportedWeightsEntry, 30 env_name: Optional[Union[Literal["DROP"], str]] = None, 31) -> BioimageioCondaEnv: 32 """get the recommended Conda environment for a given weights entry description""" 33 if isinstance(entry, (v0_4.OnnxWeightsDescr, v0_5.OnnxWeightsDescr)): 34 conda_env = _get_default_onnx_env(opset_version=entry.opset_version) 35 elif isinstance( 36 entry, 37 ( 38 v0_4.PytorchStateDictWeightsDescr, 39 v0_5.PytorchStateDictWeightsDescr, 40 v0_4.TorchscriptWeightsDescr, 41 v0_5.TorchscriptWeightsDescr, 42 ), 43 ): 44 if ( 45 isinstance(entry, v0_5.TorchscriptWeightsDescr) 46 or entry.dependencies is None 47 ): 48 conda_env = _get_default_pytorch_env(pytorch_version=entry.pytorch_version) 49 else: 50 conda_env = _get_env_from_deps(entry.dependencies) 51 52 elif isinstance( 53 entry, 54 ( 55 v0_4.TensorflowSavedModelBundleWeightsDescr, 56 v0_5.TensorflowSavedModelBundleWeightsDescr, 57 ), 58 ): 59 if entry.dependencies is None: 60 conda_env = _get_default_tf_env(tensorflow_version=entry.tensorflow_version) 61 else: 62 conda_env = _get_env_from_deps(entry.dependencies) 63 elif isinstance( 64 entry, 65 (v0_4.KerasHdf5WeightsDescr, v0_5.KerasHdf5WeightsDescr), 66 ): 67 conda_env = _get_default_tf_env(tensorflow_version=entry.tensorflow_version) 68 else: 69 assert_never(entry) 70 71 if env_name == "DROP": 72 conda_env.name = None 73 elif env_name is not None: 74 conda_env.name = env_name 75 76 return conda_env
get the recommended Conda environment for a given weights entry description
40def get_resource_package_content( 41 rd: ResourceDescr, 42 /, 43 *, 44 bioimageio_yaml_file_name: FileName = BIOIMAGEIO_YAML, 45 weights_priority_order: Optional[Sequence[WeightsFormat]] = None, # model only 46) -> Dict[FileName, Union[HttpUrl, AbsoluteFilePath, BioimageioYamlContent, ZipPath]]: 47 ret: Dict[ 48 FileName, Union[HttpUrl, AbsoluteFilePath, BioimageioYamlContent, ZipPath] 49 ] = {} 50 for k, v in get_package_content( 51 rd, 52 bioimageio_yaml_file_name=bioimageio_yaml_file_name, 53 weights_priority_order=weights_priority_order, 54 ).items(): 55 if isinstance(v, FileDescr): 56 if isinstance(v.source, (Path, RelativeFilePath)): 57 ret[k] = v.source.absolute() 58 else: 59 ret[k] = v.source 60 61 else: 62 ret[k] = v 63 64 return ret
209def get_validation_context( 210 default: Optional[ValidationContext] = None, 211) -> ValidationContext: 212 """Get the currently active validation context (or a default)""" 213 return _validation_context_var.get() or default or ValidationContext()
Get the currently active validation context (or a default)
392class InvalidDescr( 393 ResourceDescrBase, 394 extra="allow", 395 title="An invalid resource description", 396): 397 """A representation of an invalid resource description""" 398 399 implemented_type: ClassVar[Literal["unknown"]] = "unknown" 400 if TYPE_CHECKING: # see NodeWithExplicitlySetFields 401 type: Any = "unknown" 402 else: 403 type: Any 404 405 implemented_format_version: ClassVar[Literal["unknown"]] = "unknown" 406 if TYPE_CHECKING: # see NodeWithExplicitlySetFields 407 format_version: Any = "unknown" 408 else: 409 format_version: Any
A representation of an invalid resource description
Configuration for the model, should be a dictionary conforming to [ConfigDict
][pydantic.config.ConfigDict].
337def init_private_attributes(self: BaseModel, context: Any, /) -> None: 338 """This function is meant to behave like a BaseModel method to initialise private attributes. 339 340 It takes context as an argument since that's what pydantic-core passes when calling it. 341 342 Args: 343 self: The BaseModel instance. 344 context: The context. 345 """ 346 if getattr(self, '__pydantic_private__', None) is None: 347 pydantic_private = {} 348 for name, private_attr in self.__private_attributes__.items(): 349 default = private_attr.get_default() 350 if default is not PydanticUndefined: 351 pydantic_private[name] = default 352 object_setattr(self, '__pydantic_private__', pydantic_private)
This function is meant to behave like a BaseModel method to initialise private attributes.
It takes context as an argument since that's what pydantic-core passes when calling it.
Arguments:
- self: The BaseModel instance.
- context: The context.
191def load_dataset_description( 192 source: Union[PermissiveFileSource, ZipFile], 193 /, 194 *, 195 format_version: Union[FormatVersionPlaceholder, str] = DISCOVER, 196 perform_io_checks: Optional[bool] = None, 197 known_files: Optional[Dict[str, Optional[Sha256]]] = None, 198 sha256: Optional[Sha256] = None, 199) -> AnyDatasetDescr: 200 """same as `load_description`, but addtionally ensures that the loaded 201 description is valid and of type 'dataset'. 202 """ 203 rd = load_description( 204 source, 205 format_version=format_version, 206 perform_io_checks=perform_io_checks, 207 known_files=known_files, 208 sha256=sha256, 209 ) 210 return ensure_description_is_dataset(rd)
same as load_description
, but addtionally ensures that the loaded
description is valid and of type 'dataset'.
243def load_description_and_validate_format_only( 244 source: Union[PermissiveFileSource, ZipFile], 245 /, 246 *, 247 format_version: Union[FormatVersionPlaceholder, str] = DISCOVER, 248 perform_io_checks: Optional[bool] = None, 249 known_files: Optional[Dict[str, Optional[Sha256]]] = None, 250 sha256: Optional[Sha256] = None, 251) -> ValidationSummary: 252 """same as `load_description`, but only return the validation summary. 253 254 Returns: 255 Validation summary of the bioimage.io resource found at `source`. 256 257 """ 258 rd = load_description( 259 source, 260 format_version=format_version, 261 perform_io_checks=perform_io_checks, 262 known_files=known_files, 263 sha256=sha256, 264 ) 265 assert rd.validation_summary is not None 266 return rd.validation_summary
same as load_description
, but only return the validation summary.
Returns:
Validation summary of the bioimage.io resource found at
source
.
57def load_description( 58 source: Union[PermissiveFileSource, ZipFile], 59 /, 60 *, 61 format_version: Union[FormatVersionPlaceholder, str] = DISCOVER, 62 perform_io_checks: Optional[bool] = None, 63 known_files: Optional[Dict[str, Optional[Sha256]]] = None, 64 sha256: Optional[Sha256] = None, 65) -> Union[ResourceDescr, InvalidDescr]: 66 """load a bioimage.io resource description 67 68 Args: 69 source: 70 Path or URL to an rdf.yaml or a bioimage.io package 71 (zip-file with rdf.yaml in it). 72 format_version: 73 (optional) Use this argument to load the resource and 74 convert its metadata to a higher format_version. 75 Note: 76 - Use "latest" to convert to the latest available format version. 77 - Use "discover" to use the format version specified in the RDF. 78 - Only considers major.minor format version, ignores patch version. 79 - Conversion to lower format versions is not supported. 80 perform_io_checks: 81 Wether or not to perform validation that requires file io, 82 e.g. downloading a remote files. The existence of local 83 absolute file paths is still being checked. 84 known_files: 85 Allows to bypass download and hashing of referenced files 86 (even if perform_io_checks is True). 87 Checked files will be added to this dictionary 88 with their SHA-256 value. 89 sha256: 90 Optional SHA-256 value of **source** 91 92 Returns: 93 An object holding all metadata of the bioimage.io resource 94 95 """ 96 if isinstance(source, ResourceDescrBase): 97 name = getattr(source, "name", f"{str(source)[:10]}...") 98 logger.warning("returning already loaded description '{}' as is", name) 99 return source # pyright: ignore[reportReturnType] 100 101 opened = open_bioimageio_yaml(source, sha256=sha256) 102 103 context = get_validation_context().replace( 104 root=opened.original_root, 105 file_name=opened.original_file_name, 106 original_source_name=opened.original_source_name, 107 perform_io_checks=perform_io_checks, 108 known_files=known_files, 109 ) 110 111 return build_description( 112 opened.content, 113 context=context, 114 format_version=format_version, 115 )
load a bioimage.io resource description
Arguments:
- source: Path or URL to an rdf.yaml or a bioimage.io package (zip-file with rdf.yaml in it).
- format_version: (optional) Use this argument to load the resource and
convert its metadata to a higher format_version.
Note:
- Use "latest" to convert to the latest available format version.
- Use "discover" to use the format version specified in the RDF.
- Only considers major.minor format version, ignores patch version.
- Conversion to lower format versions is not supported.
- perform_io_checks: Wether or not to perform validation that requires file io, e.g. downloading a remote files. The existence of local absolute file paths is still being checked.
- known_files: Allows to bypass download and hashing of referenced files (even if perform_io_checks is True). Checked files will be added to this dictionary with their SHA-256 value.
- sha256: Optional SHA-256 value of source
Returns:
An object holding all metadata of the bioimage.io resource
142def load_model_description( 143 source: Union[PermissiveFileSource, ZipFile], 144 /, 145 *, 146 format_version: Union[FormatVersionPlaceholder, str] = DISCOVER, 147 perform_io_checks: Optional[bool] = None, 148 known_files: Optional[Dict[str, Optional[Sha256]]] = None, 149 sha256: Optional[Sha256] = None, 150) -> AnyModelDescr: 151 """same as `load_description`, but addtionally ensures that the loaded 152 description is valid and of type 'model'. 153 154 Raises: 155 ValueError: for invalid or non-model resources 156 """ 157 rd = load_description( 158 source, 159 format_version=format_version, 160 perform_io_checks=perform_io_checks, 161 known_files=known_files, 162 sha256=sha256, 163 ) 164 return ensure_description_is_model(rd)
same as load_description
, but addtionally ensures that the loaded
description is valid and of type 'model'.
Raises:
- ValueError: for invalid or non-model resources
2610class ModelDescr(GenericModelDescrBase): 2611 """Specification of the fields used in a bioimage.io-compliant RDF to describe AI models with pretrained weights. 2612 These fields are typically stored in a YAML file which we call a model resource description file (model RDF). 2613 """ 2614 2615 implemented_format_version: ClassVar[Literal["0.5.5"]] = "0.5.5" 2616 if TYPE_CHECKING: 2617 format_version: Literal["0.5.5"] = "0.5.5" 2618 else: 2619 format_version: Literal["0.5.5"] 2620 """Version of the bioimage.io model description specification used. 2621 When creating a new model always use the latest micro/patch version described here. 2622 The `format_version` is important for any consumer software to understand how to parse the fields. 2623 """ 2624 2625 implemented_type: ClassVar[Literal["model"]] = "model" 2626 if TYPE_CHECKING: 2627 type: Literal["model"] = "model" 2628 else: 2629 type: Literal["model"] 2630 """Specialized resource type 'model'""" 2631 2632 id: Optional[ModelId] = None 2633 """bioimage.io-wide unique resource identifier 2634 assigned by bioimage.io; version **un**specific.""" 2635 2636 authors: FAIR[List[Author]] = Field( 2637 default_factory=cast(Callable[[], List[Author]], list) 2638 ) 2639 """The authors are the creators of the model RDF and the primary points of contact.""" 2640 2641 documentation: FAIR[Optional[FileSource_documentation]] = None 2642 """URL or relative path to a markdown file with additional documentation. 2643 The recommended documentation file name is `README.md`. An `.md` suffix is mandatory. 2644 The documentation should include a '#[#] Validation' (sub)section 2645 with details on how to quantitatively validate the model on unseen data.""" 2646 2647 @field_validator("documentation", mode="after") 2648 @classmethod 2649 def _validate_documentation( 2650 cls, value: Optional[FileSource_documentation] 2651 ) -> Optional[FileSource_documentation]: 2652 if not get_validation_context().perform_io_checks or value is None: 2653 return value 2654 2655 doc_reader = get_reader(value) 2656 doc_content = doc_reader.read().decode(encoding="utf-8") 2657 if not re.search("#.*[vV]alidation", doc_content): 2658 issue_warning( 2659 "No '# Validation' (sub)section found in {value}.", 2660 value=value, 2661 field="documentation", 2662 ) 2663 2664 return value 2665 2666 inputs: NotEmpty[Sequence[InputTensorDescr]] 2667 """Describes the input tensors expected by this model.""" 2668 2669 @field_validator("inputs", mode="after") 2670 @classmethod 2671 def _validate_input_axes( 2672 cls, inputs: Sequence[InputTensorDescr] 2673 ) -> Sequence[InputTensorDescr]: 2674 input_size_refs = cls._get_axes_with_independent_size(inputs) 2675 2676 for i, ipt in enumerate(inputs): 2677 valid_independent_refs: Dict[ 2678 Tuple[TensorId, AxisId], 2679 Tuple[TensorDescr, AnyAxis, Union[int, ParameterizedSize]], 2680 ] = { 2681 **{ 2682 (ipt.id, a.id): (ipt, a, a.size) 2683 for a in ipt.axes 2684 if not isinstance(a, BatchAxis) 2685 and isinstance(a.size, (int, ParameterizedSize)) 2686 }, 2687 **input_size_refs, 2688 } 2689 for a, ax in enumerate(ipt.axes): 2690 cls._validate_axis( 2691 "inputs", 2692 i=i, 2693 tensor_id=ipt.id, 2694 a=a, 2695 axis=ax, 2696 valid_independent_refs=valid_independent_refs, 2697 ) 2698 return inputs 2699 2700 @staticmethod 2701 def _validate_axis( 2702 field_name: str, 2703 i: int, 2704 tensor_id: TensorId, 2705 a: int, 2706 axis: AnyAxis, 2707 valid_independent_refs: Dict[ 2708 Tuple[TensorId, AxisId], 2709 Tuple[TensorDescr, AnyAxis, Union[int, ParameterizedSize]], 2710 ], 2711 ): 2712 if isinstance(axis, BatchAxis) or isinstance( 2713 axis.size, (int, ParameterizedSize, DataDependentSize) 2714 ): 2715 return 2716 elif not isinstance(axis.size, SizeReference): 2717 assert_never(axis.size) 2718 2719 # validate axis.size SizeReference 2720 ref = (axis.size.tensor_id, axis.size.axis_id) 2721 if ref not in valid_independent_refs: 2722 raise ValueError( 2723 "Invalid tensor axis reference at" 2724 + f" {field_name}[{i}].axes[{a}].size: {axis.size}." 2725 ) 2726 if ref == (tensor_id, axis.id): 2727 raise ValueError( 2728 "Self-referencing not allowed for" 2729 + f" {field_name}[{i}].axes[{a}].size: {axis.size}" 2730 ) 2731 if axis.type == "channel": 2732 if valid_independent_refs[ref][1].type != "channel": 2733 raise ValueError( 2734 "A channel axis' size may only reference another fixed size" 2735 + " channel axis." 2736 ) 2737 if isinstance(axis.channel_names, str) and "{i}" in axis.channel_names: 2738 ref_size = valid_independent_refs[ref][2] 2739 assert isinstance(ref_size, int), ( 2740 "channel axis ref (another channel axis) has to specify fixed" 2741 + " size" 2742 ) 2743 generated_channel_names = [ 2744 Identifier(axis.channel_names.format(i=i)) 2745 for i in range(1, ref_size + 1) 2746 ] 2747 axis.channel_names = generated_channel_names 2748 2749 if (ax_unit := getattr(axis, "unit", None)) != ( 2750 ref_unit := getattr(valid_independent_refs[ref][1], "unit", None) 2751 ): 2752 raise ValueError( 2753 "The units of an axis and its reference axis need to match, but" 2754 + f" '{ax_unit}' != '{ref_unit}'." 2755 ) 2756 ref_axis = valid_independent_refs[ref][1] 2757 if isinstance(ref_axis, BatchAxis): 2758 raise ValueError( 2759 f"Invalid reference axis '{ref_axis.id}' for {tensor_id}.{axis.id}" 2760 + " (a batch axis is not allowed as reference)." 2761 ) 2762 2763 if isinstance(axis, WithHalo): 2764 min_size = axis.size.get_size(axis, ref_axis, n=0) 2765 if (min_size - 2 * axis.halo) < 1: 2766 raise ValueError( 2767 f"axis {axis.id} with minimum size {min_size} is too small for halo" 2768 + f" {axis.halo}." 2769 ) 2770 2771 input_halo = axis.halo * axis.scale / ref_axis.scale 2772 if input_halo != int(input_halo) or input_halo % 2 == 1: 2773 raise ValueError( 2774 f"input_halo {input_halo} (output_halo {axis.halo} *" 2775 + f" output_scale {axis.scale} / input_scale {ref_axis.scale})" 2776 + f" {tensor_id}.{axis.id}." 2777 ) 2778 2779 @model_validator(mode="after") 2780 def _validate_test_tensors(self) -> Self: 2781 if not get_validation_context().perform_io_checks: 2782 return self 2783 2784 test_output_arrays = [ 2785 None if descr.test_tensor is None else load_array(descr.test_tensor) 2786 for descr in self.outputs 2787 ] 2788 test_input_arrays = [ 2789 None if descr.test_tensor is None else load_array(descr.test_tensor) 2790 for descr in self.inputs 2791 ] 2792 2793 tensors = { 2794 descr.id: (descr, array) 2795 for descr, array in zip( 2796 chain(self.inputs, self.outputs), test_input_arrays + test_output_arrays 2797 ) 2798 } 2799 validate_tensors(tensors, tensor_origin="test_tensor") 2800 2801 output_arrays = { 2802 descr.id: array for descr, array in zip(self.outputs, test_output_arrays) 2803 } 2804 for rep_tol in self.config.bioimageio.reproducibility_tolerance: 2805 if not rep_tol.absolute_tolerance: 2806 continue 2807 2808 if rep_tol.output_ids: 2809 out_arrays = { 2810 oid: a 2811 for oid, a in output_arrays.items() 2812 if oid in rep_tol.output_ids 2813 } 2814 else: 2815 out_arrays = output_arrays 2816 2817 for out_id, array in out_arrays.items(): 2818 if array is None: 2819 continue 2820 2821 if rep_tol.absolute_tolerance > (max_test_value := array.max()) * 0.01: 2822 raise ValueError( 2823 "config.bioimageio.reproducibility_tolerance.absolute_tolerance=" 2824 + f"{rep_tol.absolute_tolerance} > 0.01*{max_test_value}" 2825 + f" (1% of the maximum value of the test tensor '{out_id}')" 2826 ) 2827 2828 return self 2829 2830 @model_validator(mode="after") 2831 def _validate_tensor_references_in_proc_kwargs(self, info: ValidationInfo) -> Self: 2832 ipt_refs = {t.id for t in self.inputs} 2833 out_refs = {t.id for t in self.outputs} 2834 for ipt in self.inputs: 2835 for p in ipt.preprocessing: 2836 ref = p.kwargs.get("reference_tensor") 2837 if ref is None: 2838 continue 2839 if ref not in ipt_refs: 2840 raise ValueError( 2841 f"`reference_tensor` '{ref}' not found. Valid input tensor" 2842 + f" references are: {ipt_refs}." 2843 ) 2844 2845 for out in self.outputs: 2846 for p in out.postprocessing: 2847 ref = p.kwargs.get("reference_tensor") 2848 if ref is None: 2849 continue 2850 2851 if ref not in ipt_refs and ref not in out_refs: 2852 raise ValueError( 2853 f"`reference_tensor` '{ref}' not found. Valid tensor references" 2854 + f" are: {ipt_refs | out_refs}." 2855 ) 2856 2857 return self 2858 2859 # TODO: use validate funcs in validate_test_tensors 2860 # def validate_inputs(self, input_tensors: Mapping[TensorId, NDArray[Any]]) -> Mapping[TensorId, NDArray[Any]]: 2861 2862 name: Annotated[ 2863 str, 2864 RestrictCharacters(string.ascii_letters + string.digits + "_+- ()"), 2865 MinLen(5), 2866 MaxLen(128), 2867 warn(MaxLen(64), "Name longer than 64 characters.", INFO), 2868 ] 2869 """A human-readable name of this model. 2870 It should be no longer than 64 characters 2871 and may only contain letter, number, underscore, minus, parentheses and spaces. 2872 We recommend to chose a name that refers to the model's task and image modality. 2873 """ 2874 2875 outputs: NotEmpty[Sequence[OutputTensorDescr]] 2876 """Describes the output tensors.""" 2877 2878 @field_validator("outputs", mode="after") 2879 @classmethod 2880 def _validate_tensor_ids( 2881 cls, outputs: Sequence[OutputTensorDescr], info: ValidationInfo 2882 ) -> Sequence[OutputTensorDescr]: 2883 tensor_ids = [ 2884 t.id for t in info.data.get("inputs", []) + info.data.get("outputs", []) 2885 ] 2886 duplicate_tensor_ids: List[str] = [] 2887 seen: Set[str] = set() 2888 for t in tensor_ids: 2889 if t in seen: 2890 duplicate_tensor_ids.append(t) 2891 2892 seen.add(t) 2893 2894 if duplicate_tensor_ids: 2895 raise ValueError(f"Duplicate tensor ids: {duplicate_tensor_ids}") 2896 2897 return outputs 2898 2899 @staticmethod 2900 def _get_axes_with_parameterized_size( 2901 io: Union[Sequence[InputTensorDescr], Sequence[OutputTensorDescr]], 2902 ): 2903 return { 2904 f"{t.id}.{a.id}": (t, a, a.size) 2905 for t in io 2906 for a in t.axes 2907 if not isinstance(a, BatchAxis) and isinstance(a.size, ParameterizedSize) 2908 } 2909 2910 @staticmethod 2911 def _get_axes_with_independent_size( 2912 io: Union[Sequence[InputTensorDescr], Sequence[OutputTensorDescr]], 2913 ): 2914 return { 2915 (t.id, a.id): (t, a, a.size) 2916 for t in io 2917 for a in t.axes 2918 if not isinstance(a, BatchAxis) 2919 and isinstance(a.size, (int, ParameterizedSize)) 2920 } 2921 2922 @field_validator("outputs", mode="after") 2923 @classmethod 2924 def _validate_output_axes( 2925 cls, outputs: List[OutputTensorDescr], info: ValidationInfo 2926 ) -> List[OutputTensorDescr]: 2927 input_size_refs = cls._get_axes_with_independent_size( 2928 info.data.get("inputs", []) 2929 ) 2930 output_size_refs = cls._get_axes_with_independent_size(outputs) 2931 2932 for i, out in enumerate(outputs): 2933 valid_independent_refs: Dict[ 2934 Tuple[TensorId, AxisId], 2935 Tuple[TensorDescr, AnyAxis, Union[int, ParameterizedSize]], 2936 ] = { 2937 **{ 2938 (out.id, a.id): (out, a, a.size) 2939 for a in out.axes 2940 if not isinstance(a, BatchAxis) 2941 and isinstance(a.size, (int, ParameterizedSize)) 2942 }, 2943 **input_size_refs, 2944 **output_size_refs, 2945 } 2946 for a, ax in enumerate(out.axes): 2947 cls._validate_axis( 2948 "outputs", 2949 i, 2950 out.id, 2951 a, 2952 ax, 2953 valid_independent_refs=valid_independent_refs, 2954 ) 2955 2956 return outputs 2957 2958 packaged_by: List[Author] = Field( 2959 default_factory=cast(Callable[[], List[Author]], list) 2960 ) 2961 """The persons that have packaged and uploaded this model. 2962 Only required if those persons differ from the `authors`.""" 2963 2964 parent: Optional[LinkedModel] = None 2965 """The model from which this model is derived, e.g. by fine-tuning the weights.""" 2966 2967 @model_validator(mode="after") 2968 def _validate_parent_is_not_self(self) -> Self: 2969 if self.parent is not None and self.parent.id == self.id: 2970 raise ValueError("A model description may not reference itself as parent.") 2971 2972 return self 2973 2974 run_mode: Annotated[ 2975 Optional[RunMode], 2976 warn(None, "Run mode '{value}' has limited support across consumer softwares."), 2977 ] = None 2978 """Custom run mode for this model: for more complex prediction procedures like test time 2979 data augmentation that currently cannot be expressed in the specification. 2980 No standard run modes are defined yet.""" 2981 2982 timestamp: Datetime = Field(default_factory=Datetime.now) 2983 """Timestamp in [ISO 8601](#https://en.wikipedia.org/wiki/ISO_8601) format 2984 with a few restrictions listed [here](https://docs.python.org/3/library/datetime.html#datetime.datetime.fromisoformat). 2985 (In Python a datetime object is valid, too).""" 2986 2987 training_data: Annotated[ 2988 Union[None, LinkedDataset, DatasetDescr, DatasetDescr02], 2989 Field(union_mode="left_to_right"), 2990 ] = None 2991 """The dataset used to train this model""" 2992 2993 weights: Annotated[WeightsDescr, WrapSerializer(package_weights)] 2994 """The weights for this model. 2995 Weights can be given for different formats, but should otherwise be equivalent. 2996 The available weight formats determine which consumers can use this model.""" 2997 2998 config: Config = Field(default_factory=Config.model_construct) 2999 3000 @model_validator(mode="after") 3001 def _add_default_cover(self) -> Self: 3002 if not get_validation_context().perform_io_checks or self.covers: 3003 return self 3004 3005 try: 3006 generated_covers = generate_covers( 3007 [ 3008 (t, load_array(t.test_tensor)) 3009 for t in self.inputs 3010 if t.test_tensor is not None 3011 ], 3012 [ 3013 (t, load_array(t.test_tensor)) 3014 for t in self.outputs 3015 if t.test_tensor is not None 3016 ], 3017 ) 3018 except Exception as e: 3019 issue_warning( 3020 "Failed to generate cover image(s): {e}", 3021 value=self.covers, 3022 msg_context=dict(e=e), 3023 field="covers", 3024 ) 3025 else: 3026 self.covers.extend(generated_covers) 3027 3028 return self 3029 3030 def get_input_test_arrays(self) -> List[NDArray[Any]]: 3031 return self._get_test_arrays(self.inputs) 3032 3033 def get_output_test_arrays(self) -> List[NDArray[Any]]: 3034 return self._get_test_arrays(self.outputs) 3035 3036 @staticmethod 3037 def _get_test_arrays( 3038 io_descr: Union[Sequence[InputTensorDescr], Sequence[OutputTensorDescr]], 3039 ): 3040 ts: List[FileDescr] = [] 3041 for d in io_descr: 3042 if d.test_tensor is None: 3043 raise ValueError( 3044 f"Failed to get test arrays: description of '{d.id}' is missing a `test_tensor`." 3045 ) 3046 ts.append(d.test_tensor) 3047 3048 data = [load_array(t) for t in ts] 3049 assert all(isinstance(d, np.ndarray) for d in data) 3050 return data 3051 3052 @staticmethod 3053 def get_batch_size(tensor_sizes: Mapping[TensorId, Mapping[AxisId, int]]) -> int: 3054 batch_size = 1 3055 tensor_with_batchsize: Optional[TensorId] = None 3056 for tid in tensor_sizes: 3057 for aid, s in tensor_sizes[tid].items(): 3058 if aid != BATCH_AXIS_ID or s == 1 or s == batch_size: 3059 continue 3060 3061 if batch_size != 1: 3062 assert tensor_with_batchsize is not None 3063 raise ValueError( 3064 f"batch size mismatch for tensors '{tensor_with_batchsize}' ({batch_size}) and '{tid}' ({s})" 3065 ) 3066 3067 batch_size = s 3068 tensor_with_batchsize = tid 3069 3070 return batch_size 3071 3072 def get_output_tensor_sizes( 3073 self, input_sizes: Mapping[TensorId, Mapping[AxisId, int]] 3074 ) -> Dict[TensorId, Dict[AxisId, Union[int, _DataDepSize]]]: 3075 """Returns the tensor output sizes for given **input_sizes**. 3076 Only if **input_sizes** has a valid input shape, the tensor output size is exact. 3077 Otherwise it might be larger than the actual (valid) output""" 3078 batch_size = self.get_batch_size(input_sizes) 3079 ns = self.get_ns(input_sizes) 3080 3081 tensor_sizes = self.get_tensor_sizes(ns, batch_size=batch_size) 3082 return tensor_sizes.outputs 3083 3084 def get_ns(self, input_sizes: Mapping[TensorId, Mapping[AxisId, int]]): 3085 """get parameter `n` for each parameterized axis 3086 such that the valid input size is >= the given input size""" 3087 ret: Dict[Tuple[TensorId, AxisId], ParameterizedSize_N] = {} 3088 axes = {t.id: {a.id: a for a in t.axes} for t in self.inputs} 3089 for tid in input_sizes: 3090 for aid, s in input_sizes[tid].items(): 3091 size_descr = axes[tid][aid].size 3092 if isinstance(size_descr, ParameterizedSize): 3093 ret[(tid, aid)] = size_descr.get_n(s) 3094 elif size_descr is None or isinstance(size_descr, (int, SizeReference)): 3095 pass 3096 else: 3097 assert_never(size_descr) 3098 3099 return ret 3100 3101 def get_tensor_sizes( 3102 self, ns: Mapping[Tuple[TensorId, AxisId], ParameterizedSize_N], batch_size: int 3103 ) -> _TensorSizes: 3104 axis_sizes = self.get_axis_sizes(ns, batch_size=batch_size) 3105 return _TensorSizes( 3106 { 3107 t: { 3108 aa: axis_sizes.inputs[(tt, aa)] 3109 for tt, aa in axis_sizes.inputs 3110 if tt == t 3111 } 3112 for t in {tt for tt, _ in axis_sizes.inputs} 3113 }, 3114 { 3115 t: { 3116 aa: axis_sizes.outputs[(tt, aa)] 3117 for tt, aa in axis_sizes.outputs 3118 if tt == t 3119 } 3120 for t in {tt for tt, _ in axis_sizes.outputs} 3121 }, 3122 ) 3123 3124 def get_axis_sizes( 3125 self, 3126 ns: Mapping[Tuple[TensorId, AxisId], ParameterizedSize_N], 3127 batch_size: Optional[int] = None, 3128 *, 3129 max_input_shape: Optional[Mapping[Tuple[TensorId, AxisId], int]] = None, 3130 ) -> _AxisSizes: 3131 """Determine input and output block shape for scale factors **ns** 3132 of parameterized input sizes. 3133 3134 Args: 3135 ns: Scale factor `n` for each axis (keyed by (tensor_id, axis_id)) 3136 that is parameterized as `size = min + n * step`. 3137 batch_size: The desired size of the batch dimension. 3138 If given **batch_size** overwrites any batch size present in 3139 **max_input_shape**. Default 1. 3140 max_input_shape: Limits the derived block shapes. 3141 Each axis for which the input size, parameterized by `n`, is larger 3142 than **max_input_shape** is set to the minimal value `n_min` for which 3143 this is still true. 3144 Use this for small input samples or large values of **ns**. 3145 Or simply whenever you know the full input shape. 3146 3147 Returns: 3148 Resolved axis sizes for model inputs and outputs. 3149 """ 3150 max_input_shape = max_input_shape or {} 3151 if batch_size is None: 3152 for (_t_id, a_id), s in max_input_shape.items(): 3153 if a_id == BATCH_AXIS_ID: 3154 batch_size = s 3155 break 3156 else: 3157 batch_size = 1 3158 3159 all_axes = { 3160 t.id: {a.id: a for a in t.axes} for t in chain(self.inputs, self.outputs) 3161 } 3162 3163 inputs: Dict[Tuple[TensorId, AxisId], int] = {} 3164 outputs: Dict[Tuple[TensorId, AxisId], Union[int, _DataDepSize]] = {} 3165 3166 def get_axis_size(a: Union[InputAxis, OutputAxis]): 3167 if isinstance(a, BatchAxis): 3168 if (t_descr.id, a.id) in ns: 3169 logger.warning( 3170 "Ignoring unexpected size increment factor (n) for batch axis" 3171 + " of tensor '{}'.", 3172 t_descr.id, 3173 ) 3174 return batch_size 3175 elif isinstance(a.size, int): 3176 if (t_descr.id, a.id) in ns: 3177 logger.warning( 3178 "Ignoring unexpected size increment factor (n) for fixed size" 3179 + " axis '{}' of tensor '{}'.", 3180 a.id, 3181 t_descr.id, 3182 ) 3183 return a.size 3184 elif isinstance(a.size, ParameterizedSize): 3185 if (t_descr.id, a.id) not in ns: 3186 raise ValueError( 3187 "Size increment factor (n) missing for parametrized axis" 3188 + f" '{a.id}' of tensor '{t_descr.id}'." 3189 ) 3190 n = ns[(t_descr.id, a.id)] 3191 s_max = max_input_shape.get((t_descr.id, a.id)) 3192 if s_max is not None: 3193 n = min(n, a.size.get_n(s_max)) 3194 3195 return a.size.get_size(n) 3196 3197 elif isinstance(a.size, SizeReference): 3198 if (t_descr.id, a.id) in ns: 3199 logger.warning( 3200 "Ignoring unexpected size increment factor (n) for axis '{}'" 3201 + " of tensor '{}' with size reference.", 3202 a.id, 3203 t_descr.id, 3204 ) 3205 assert not isinstance(a, BatchAxis) 3206 ref_axis = all_axes[a.size.tensor_id][a.size.axis_id] 3207 assert not isinstance(ref_axis, BatchAxis) 3208 ref_key = (a.size.tensor_id, a.size.axis_id) 3209 ref_size = inputs.get(ref_key, outputs.get(ref_key)) 3210 assert ref_size is not None, ref_key 3211 assert not isinstance(ref_size, _DataDepSize), ref_key 3212 return a.size.get_size( 3213 axis=a, 3214 ref_axis=ref_axis, 3215 ref_size=ref_size, 3216 ) 3217 elif isinstance(a.size, DataDependentSize): 3218 if (t_descr.id, a.id) in ns: 3219 logger.warning( 3220 "Ignoring unexpected increment factor (n) for data dependent" 3221 + " size axis '{}' of tensor '{}'.", 3222 a.id, 3223 t_descr.id, 3224 ) 3225 return _DataDepSize(a.size.min, a.size.max) 3226 else: 3227 assert_never(a.size) 3228 3229 # first resolve all , but the `SizeReference` input sizes 3230 for t_descr in self.inputs: 3231 for a in t_descr.axes: 3232 if not isinstance(a.size, SizeReference): 3233 s = get_axis_size(a) 3234 assert not isinstance(s, _DataDepSize) 3235 inputs[t_descr.id, a.id] = s 3236 3237 # resolve all other input axis sizes 3238 for t_descr in self.inputs: 3239 for a in t_descr.axes: 3240 if isinstance(a.size, SizeReference): 3241 s = get_axis_size(a) 3242 assert not isinstance(s, _DataDepSize) 3243 inputs[t_descr.id, a.id] = s 3244 3245 # resolve all output axis sizes 3246 for t_descr in self.outputs: 3247 for a in t_descr.axes: 3248 assert not isinstance(a.size, ParameterizedSize) 3249 s = get_axis_size(a) 3250 outputs[t_descr.id, a.id] = s 3251 3252 return _AxisSizes(inputs=inputs, outputs=outputs) 3253 3254 @model_validator(mode="before") 3255 @classmethod 3256 def _convert(cls, data: Dict[str, Any]) -> Dict[str, Any]: 3257 cls.convert_from_old_format_wo_validation(data) 3258 return data 3259 3260 @classmethod 3261 def convert_from_old_format_wo_validation(cls, data: Dict[str, Any]) -> None: 3262 """Convert metadata following an older format version to this classes' format 3263 without validating the result. 3264 """ 3265 if ( 3266 data.get("type") == "model" 3267 and isinstance(fv := data.get("format_version"), str) 3268 and fv.count(".") == 2 3269 ): 3270 fv_parts = fv.split(".") 3271 if any(not p.isdigit() for p in fv_parts): 3272 return 3273 3274 fv_tuple = tuple(map(int, fv_parts)) 3275 3276 assert cls.implemented_format_version_tuple[0:2] == (0, 5) 3277 if fv_tuple[:2] in ((0, 3), (0, 4)): 3278 m04 = _ModelDescr_v0_4.load(data) 3279 if isinstance(m04, InvalidDescr): 3280 try: 3281 updated = _model_conv.convert_as_dict( 3282 m04 # pyright: ignore[reportArgumentType] 3283 ) 3284 except Exception as e: 3285 logger.error( 3286 "Failed to convert from invalid model 0.4 description." 3287 + f"\nerror: {e}" 3288 + "\nProceeding with model 0.5 validation without conversion." 3289 ) 3290 updated = None 3291 else: 3292 updated = _model_conv.convert_as_dict(m04) 3293 3294 if updated is not None: 3295 data.clear() 3296 data.update(updated) 3297 3298 elif fv_tuple[:2] == (0, 5): 3299 # bump patch version 3300 data["format_version"] = cls.implemented_format_version
Specification of the fields used in a bioimage.io-compliant RDF to describe AI models with pretrained weights. These fields are typically stored in a YAML file which we call a model resource description file (model RDF).
bioimage.io-wide unique resource identifier assigned by bioimage.io; version unspecific.
URL or relative path to a markdown file with additional documentation.
The recommended documentation file name is README.md
. An .md
suffix is mandatory.
The documentation should include a '#[#] Validation' (sub)section
with details on how to quantitatively validate the model on unseen data.
Describes the input tensors expected by this model.
A human-readable name of this model. It should be no longer than 64 characters and may only contain letter, number, underscore, minus, parentheses and spaces. We recommend to chose a name that refers to the model's task and image modality.
Describes the output tensors.
The persons that have packaged and uploaded this model.
Only required if those persons differ from the authors
.
The model from which this model is derived, e.g. by fine-tuning the weights.
Custom run mode for this model: for more complex prediction procedures like test time data augmentation that currently cannot be expressed in the specification. No standard run modes are defined yet.
The dataset used to train this model
The weights for this model. Weights can be given for different formats, but should otherwise be equivalent. The available weight formats determine which consumers can use this model.
3052 @staticmethod 3053 def get_batch_size(tensor_sizes: Mapping[TensorId, Mapping[AxisId, int]]) -> int: 3054 batch_size = 1 3055 tensor_with_batchsize: Optional[TensorId] = None 3056 for tid in tensor_sizes: 3057 for aid, s in tensor_sizes[tid].items(): 3058 if aid != BATCH_AXIS_ID or s == 1 or s == batch_size: 3059 continue 3060 3061 if batch_size != 1: 3062 assert tensor_with_batchsize is not None 3063 raise ValueError( 3064 f"batch size mismatch for tensors '{tensor_with_batchsize}' ({batch_size}) and '{tid}' ({s})" 3065 ) 3066 3067 batch_size = s 3068 tensor_with_batchsize = tid 3069 3070 return batch_size
3072 def get_output_tensor_sizes( 3073 self, input_sizes: Mapping[TensorId, Mapping[AxisId, int]] 3074 ) -> Dict[TensorId, Dict[AxisId, Union[int, _DataDepSize]]]: 3075 """Returns the tensor output sizes for given **input_sizes**. 3076 Only if **input_sizes** has a valid input shape, the tensor output size is exact. 3077 Otherwise it might be larger than the actual (valid) output""" 3078 batch_size = self.get_batch_size(input_sizes) 3079 ns = self.get_ns(input_sizes) 3080 3081 tensor_sizes = self.get_tensor_sizes(ns, batch_size=batch_size) 3082 return tensor_sizes.outputs
Returns the tensor output sizes for given input_sizes. Only if input_sizes has a valid input shape, the tensor output size is exact. Otherwise it might be larger than the actual (valid) output
3084 def get_ns(self, input_sizes: Mapping[TensorId, Mapping[AxisId, int]]): 3085 """get parameter `n` for each parameterized axis 3086 such that the valid input size is >= the given input size""" 3087 ret: Dict[Tuple[TensorId, AxisId], ParameterizedSize_N] = {} 3088 axes = {t.id: {a.id: a for a in t.axes} for t in self.inputs} 3089 for tid in input_sizes: 3090 for aid, s in input_sizes[tid].items(): 3091 size_descr = axes[tid][aid].size 3092 if isinstance(size_descr, ParameterizedSize): 3093 ret[(tid, aid)] = size_descr.get_n(s) 3094 elif size_descr is None or isinstance(size_descr, (int, SizeReference)): 3095 pass 3096 else: 3097 assert_never(size_descr) 3098 3099 return ret
get parameter n
for each parameterized axis
such that the valid input size is >= the given input size
3101 def get_tensor_sizes( 3102 self, ns: Mapping[Tuple[TensorId, AxisId], ParameterizedSize_N], batch_size: int 3103 ) -> _TensorSizes: 3104 axis_sizes = self.get_axis_sizes(ns, batch_size=batch_size) 3105 return _TensorSizes( 3106 { 3107 t: { 3108 aa: axis_sizes.inputs[(tt, aa)] 3109 for tt, aa in axis_sizes.inputs 3110 if tt == t 3111 } 3112 for t in {tt for tt, _ in axis_sizes.inputs} 3113 }, 3114 { 3115 t: { 3116 aa: axis_sizes.outputs[(tt, aa)] 3117 for tt, aa in axis_sizes.outputs 3118 if tt == t 3119 } 3120 for t in {tt for tt, _ in axis_sizes.outputs} 3121 }, 3122 )
3124 def get_axis_sizes( 3125 self, 3126 ns: Mapping[Tuple[TensorId, AxisId], ParameterizedSize_N], 3127 batch_size: Optional[int] = None, 3128 *, 3129 max_input_shape: Optional[Mapping[Tuple[TensorId, AxisId], int]] = None, 3130 ) -> _AxisSizes: 3131 """Determine input and output block shape for scale factors **ns** 3132 of parameterized input sizes. 3133 3134 Args: 3135 ns: Scale factor `n` for each axis (keyed by (tensor_id, axis_id)) 3136 that is parameterized as `size = min + n * step`. 3137 batch_size: The desired size of the batch dimension. 3138 If given **batch_size** overwrites any batch size present in 3139 **max_input_shape**. Default 1. 3140 max_input_shape: Limits the derived block shapes. 3141 Each axis for which the input size, parameterized by `n`, is larger 3142 than **max_input_shape** is set to the minimal value `n_min` for which 3143 this is still true. 3144 Use this for small input samples or large values of **ns**. 3145 Or simply whenever you know the full input shape. 3146 3147 Returns: 3148 Resolved axis sizes for model inputs and outputs. 3149 """ 3150 max_input_shape = max_input_shape or {} 3151 if batch_size is None: 3152 for (_t_id, a_id), s in max_input_shape.items(): 3153 if a_id == BATCH_AXIS_ID: 3154 batch_size = s 3155 break 3156 else: 3157 batch_size = 1 3158 3159 all_axes = { 3160 t.id: {a.id: a for a in t.axes} for t in chain(self.inputs, self.outputs) 3161 } 3162 3163 inputs: Dict[Tuple[TensorId, AxisId], int] = {} 3164 outputs: Dict[Tuple[TensorId, AxisId], Union[int, _DataDepSize]] = {} 3165 3166 def get_axis_size(a: Union[InputAxis, OutputAxis]): 3167 if isinstance(a, BatchAxis): 3168 if (t_descr.id, a.id) in ns: 3169 logger.warning( 3170 "Ignoring unexpected size increment factor (n) for batch axis" 3171 + " of tensor '{}'.", 3172 t_descr.id, 3173 ) 3174 return batch_size 3175 elif isinstance(a.size, int): 3176 if (t_descr.id, a.id) in ns: 3177 logger.warning( 3178 "Ignoring unexpected size increment factor (n) for fixed size" 3179 + " axis '{}' of tensor '{}'.", 3180 a.id, 3181 t_descr.id, 3182 ) 3183 return a.size 3184 elif isinstance(a.size, ParameterizedSize): 3185 if (t_descr.id, a.id) not in ns: 3186 raise ValueError( 3187 "Size increment factor (n) missing for parametrized axis" 3188 + f" '{a.id}' of tensor '{t_descr.id}'." 3189 ) 3190 n = ns[(t_descr.id, a.id)] 3191 s_max = max_input_shape.get((t_descr.id, a.id)) 3192 if s_max is not None: 3193 n = min(n, a.size.get_n(s_max)) 3194 3195 return a.size.get_size(n) 3196 3197 elif isinstance(a.size, SizeReference): 3198 if (t_descr.id, a.id) in ns: 3199 logger.warning( 3200 "Ignoring unexpected size increment factor (n) for axis '{}'" 3201 + " of tensor '{}' with size reference.", 3202 a.id, 3203 t_descr.id, 3204 ) 3205 assert not isinstance(a, BatchAxis) 3206 ref_axis = all_axes[a.size.tensor_id][a.size.axis_id] 3207 assert not isinstance(ref_axis, BatchAxis) 3208 ref_key = (a.size.tensor_id, a.size.axis_id) 3209 ref_size = inputs.get(ref_key, outputs.get(ref_key)) 3210 assert ref_size is not None, ref_key 3211 assert not isinstance(ref_size, _DataDepSize), ref_key 3212 return a.size.get_size( 3213 axis=a, 3214 ref_axis=ref_axis, 3215 ref_size=ref_size, 3216 ) 3217 elif isinstance(a.size, DataDependentSize): 3218 if (t_descr.id, a.id) in ns: 3219 logger.warning( 3220 "Ignoring unexpected increment factor (n) for data dependent" 3221 + " size axis '{}' of tensor '{}'.", 3222 a.id, 3223 t_descr.id, 3224 ) 3225 return _DataDepSize(a.size.min, a.size.max) 3226 else: 3227 assert_never(a.size) 3228 3229 # first resolve all , but the `SizeReference` input sizes 3230 for t_descr in self.inputs: 3231 for a in t_descr.axes: 3232 if not isinstance(a.size, SizeReference): 3233 s = get_axis_size(a) 3234 assert not isinstance(s, _DataDepSize) 3235 inputs[t_descr.id, a.id] = s 3236 3237 # resolve all other input axis sizes 3238 for t_descr in self.inputs: 3239 for a in t_descr.axes: 3240 if isinstance(a.size, SizeReference): 3241 s = get_axis_size(a) 3242 assert not isinstance(s, _DataDepSize) 3243 inputs[t_descr.id, a.id] = s 3244 3245 # resolve all output axis sizes 3246 for t_descr in self.outputs: 3247 for a in t_descr.axes: 3248 assert not isinstance(a.size, ParameterizedSize) 3249 s = get_axis_size(a) 3250 outputs[t_descr.id, a.id] = s 3251 3252 return _AxisSizes(inputs=inputs, outputs=outputs)
Determine input and output block shape for scale factors ns of parameterized input sizes.
Arguments:
- ns: Scale factor
n
for each axis (keyed by (tensor_id, axis_id)) that is parameterized assize = min + n * step
. - batch_size: The desired size of the batch dimension. If given batch_size overwrites any batch size present in max_input_shape. Default 1.
- max_input_shape: Limits the derived block shapes.
Each axis for which the input size, parameterized by
n
, is larger than max_input_shape is set to the minimal valuen_min
for which this is still true. Use this for small input samples or large values of ns. Or simply whenever you know the full input shape.
Returns:
Resolved axis sizes for model inputs and outputs.
3260 @classmethod 3261 def convert_from_old_format_wo_validation(cls, data: Dict[str, Any]) -> None: 3262 """Convert metadata following an older format version to this classes' format 3263 without validating the result. 3264 """ 3265 if ( 3266 data.get("type") == "model" 3267 and isinstance(fv := data.get("format_version"), str) 3268 and fv.count(".") == 2 3269 ): 3270 fv_parts = fv.split(".") 3271 if any(not p.isdigit() for p in fv_parts): 3272 return 3273 3274 fv_tuple = tuple(map(int, fv_parts)) 3275 3276 assert cls.implemented_format_version_tuple[0:2] == (0, 5) 3277 if fv_tuple[:2] in ((0, 3), (0, 4)): 3278 m04 = _ModelDescr_v0_4.load(data) 3279 if isinstance(m04, InvalidDescr): 3280 try: 3281 updated = _model_conv.convert_as_dict( 3282 m04 # pyright: ignore[reportArgumentType] 3283 ) 3284 except Exception as e: 3285 logger.error( 3286 "Failed to convert from invalid model 0.4 description." 3287 + f"\nerror: {e}" 3288 + "\nProceeding with model 0.5 validation without conversion." 3289 ) 3290 updated = None 3291 else: 3292 updated = _model_conv.convert_as_dict(m04) 3293 3294 if updated is not None: 3295 data.clear() 3296 data.update(updated) 3297 3298 elif fv_tuple[:2] == (0, 5): 3299 # bump patch version 3300 data["format_version"] = cls.implemented_format_version
Convert metadata following an older format version to this classes' format without validating the result.
Configuration for the model, should be a dictionary conforming to [ConfigDict
][pydantic.config.ConfigDict].
337def init_private_attributes(self: BaseModel, context: Any, /) -> None: 338 """This function is meant to behave like a BaseModel method to initialise private attributes. 339 340 It takes context as an argument since that's what pydantic-core passes when calling it. 341 342 Args: 343 self: The BaseModel instance. 344 context: The context. 345 """ 346 if getattr(self, '__pydantic_private__', None) is None: 347 pydantic_private = {} 348 for name, private_attr in self.__private_attributes__.items(): 349 default = private_attr.get_default() 350 if default is not PydanticUndefined: 351 pydantic_private[name] = default 352 object_setattr(self, '__pydantic_private__', pydantic_private)
This function is meant to behave like a BaseModel method to initialise private attributes.
It takes context as an argument since that's what pydantic-core passes when calling it.
Arguments:
- self: The BaseModel instance.
- context: The context.
Inherited Members
31class NotebookDescr(GenericDescrBase): 32 """Bioimage.io description of a Jupyter notebook.""" 33 34 implemented_type: ClassVar[Literal["notebook"]] = "notebook" 35 if TYPE_CHECKING: 36 type: Literal["notebook"] = "notebook" 37 else: 38 type: Literal["notebook"] 39 40 id: Optional[NotebookId] = None 41 """bioimage.io-wide unique resource identifier 42 assigned by bioimage.io; version **un**specific.""" 43 44 parent: Optional[NotebookId] = None 45 """The description from which this one is derived""" 46 47 source: NotebookSource 48 """The Jupyter notebook"""
Bioimage.io description of a Jupyter notebook.
bioimage.io-wide unique resource identifier assigned by bioimage.io; version unspecific.
The description from which this one is derived
The Jupyter notebook
Configuration for the model, should be a dictionary conforming to [ConfigDict
][pydantic.config.ConfigDict].
337def init_private_attributes(self: BaseModel, context: Any, /) -> None: 338 """This function is meant to behave like a BaseModel method to initialise private attributes. 339 340 It takes context as an argument since that's what pydantic-core passes when calling it. 341 342 Args: 343 self: The BaseModel instance. 344 context: The context. 345 """ 346 if getattr(self, '__pydantic_private__', None) is None: 347 pydantic_private = {} 348 for name, private_attr in self.__private_attributes__.items(): 349 default = private_attr.get_default() 350 if default is not PydanticUndefined: 351 pydantic_private[name] = default 352 object_setattr(self, '__pydantic_private__', pydantic_private)
This function is meant to behave like a BaseModel method to initialise private attributes.
It takes context as an argument since that's what pydantic-core passes when calling it.
Arguments:
- self: The BaseModel instance.
- context: The context.
Inherited Members
- bioimageio.spec.generic.v0_3.GenericDescrBase
- implemented_format_version
- convert_from_old_format_wo_validation
- documentation
- badges
- config
- bioimageio.spec.generic.v0_3.GenericModelDescrBase
- name
- description
- covers
- id_emoji
- attachments
- cite
- license
- git_repo
- icon
- links
- uploader
- maintainers
- warn_about_tag_categories
- version
- version_comment
150def save_bioimageio_package_as_folder( 151 source: Union[BioimageioYamlSource, ResourceDescr], 152 /, 153 *, 154 output_path: Union[NewPath, DirectoryPath, None] = None, 155 weights_priority_order: Optional[ # model only 156 Sequence[ 157 Literal[ 158 "keras_hdf5", 159 "onnx", 160 "pytorch_state_dict", 161 "tensorflow_js", 162 "tensorflow_saved_model_bundle", 163 "torchscript", 164 ] 165 ] 166 ] = None, 167) -> DirectoryPath: 168 """Write the content of a bioimage.io resource package to a folder. 169 170 Args: 171 source: bioimageio resource description 172 output_path: file path to write package to 173 weights_priority_order: If given only the first weights format present in the model is included. 174 If none of the prioritized weights formats is found all are included. 175 176 Returns: 177 directory path to bioimageio package folder 178 """ 179 package_content = _prepare_resource_package( 180 source, 181 weights_priority_order=weights_priority_order, 182 ) 183 if output_path is None: 184 output_path = Path(mkdtemp()) 185 else: 186 output_path = Path(output_path) 187 188 output_path.mkdir(exist_ok=True, parents=True) 189 for name, src in package_content.items(): 190 if isinstance(src, collections.abc.Mapping): 191 write_yaml(src, output_path / name) 192 elif ( 193 isinstance(src.original_root, Path) 194 and src.original_root / src.original_file_name 195 == (output_path / name).resolve() 196 ): 197 logger.debug( 198 f"Not copying {src.original_root / src.original_file_name} to itself." 199 ) 200 else: 201 if isinstance(src.original_root, Path): 202 logger.debug( 203 f"Copying from path {src.original_root / src.original_file_name} to {output_path / name}." 204 ) 205 else: 206 logger.debug( 207 f"Copying {src.original_root}/{src.original_file_name} to {output_path / name}." 208 ) 209 with (output_path / name).open("wb") as dest: 210 _ = shutil.copyfileobj(src, dest) 211 212 return output_path
Write the content of a bioimage.io resource package to a folder.
Arguments:
- source: bioimageio resource description
- output_path: file path to write package to
- weights_priority_order: If given only the first weights format present in the model is included. If none of the prioritized weights formats is found all are included.
Returns:
directory path to bioimageio package folder
279def save_bioimageio_package_to_stream( 280 source: Union[BioimageioYamlSource, ResourceDescr], 281 /, 282 *, 283 compression: int = ZIP_DEFLATED, 284 compression_level: int = 1, 285 output_stream: Union[IO[bytes], None] = None, 286 weights_priority_order: Optional[ # model only 287 Sequence[ 288 Literal[ 289 "keras_hdf5", 290 "onnx", 291 "pytorch_state_dict", 292 "tensorflow_js", 293 "tensorflow_saved_model_bundle", 294 "torchscript", 295 ] 296 ] 297 ] = None, 298) -> IO[bytes]: 299 """Package a bioimageio resource into a stream. 300 301 Args: 302 rd: bioimageio resource description 303 compression: The numeric constant of compression method. 304 compression_level: Compression level to use when writing files to the archive. 305 See https://docs.python.org/3/library/zipfile.html#zipfile.ZipFile 306 output_stream: stream to write package to 307 weights_priority_order: If given only the first weights format present in the model is included. 308 If none of the prioritized weights formats is found all are included. 309 310 Note: this function bypasses safety checks and does not load/validate the model after writing. 311 312 Returns: 313 stream of zipped bioimageio package 314 """ 315 if output_stream is None: 316 output_stream = BytesIO() 317 318 package_content = _prepare_resource_package( 319 source, 320 weights_priority_order=weights_priority_order, 321 ) 322 323 write_zip( 324 output_stream, 325 package_content, 326 compression=compression, 327 compression_level=compression_level, 328 ) 329 330 return output_stream
Package a bioimageio resource into a stream.
Arguments:
- rd: bioimageio resource description
- compression: The numeric constant of compression method.
- compression_level: Compression level to use when writing files to the archive. See https://docs.python.org/3/library/zipfile.html#zipfile.ZipFile
- output_stream: stream to write package to
- weights_priority_order: If given only the first weights format present in the model is included. If none of the prioritized weights formats is found all are included.
Note: this function bypasses safety checks and does not load/validate the model after writing.
Returns:
stream of zipped bioimageio package
215def save_bioimageio_package( 216 source: Union[BioimageioYamlSource, ResourceDescr], 217 /, 218 *, 219 compression: int = ZIP_DEFLATED, 220 compression_level: int = 1, 221 output_path: Union[NewPath, FilePath, None] = None, 222 weights_priority_order: Optional[ # model only 223 Sequence[ 224 Literal[ 225 "keras_hdf5", 226 "onnx", 227 "pytorch_state_dict", 228 "tensorflow_js", 229 "tensorflow_saved_model_bundle", 230 "torchscript", 231 ] 232 ] 233 ] = None, 234 allow_invalid: bool = False, 235) -> FilePath: 236 """Package a bioimageio resource as a zip file. 237 238 Args: 239 rd: bioimageio resource description 240 compression: The numeric constant of compression method. 241 compression_level: Compression level to use when writing files to the archive. 242 See https://docs.python.org/3/library/zipfile.html#zipfile.ZipFile 243 output_path: file path to write package to 244 weights_priority_order: If given only the first weights format present in the model is included. 245 If none of the prioritized weights formats is found all are included. 246 247 Returns: 248 path to zipped bioimageio package 249 """ 250 package_content = _prepare_resource_package( 251 source, 252 weights_priority_order=weights_priority_order, 253 ) 254 if output_path is None: 255 output_path = Path( 256 NamedTemporaryFile(suffix=".bioimageio.zip", delete=False).name 257 ) 258 else: 259 output_path = Path(output_path) 260 261 write_zip( 262 output_path, 263 package_content, 264 compression=compression, 265 compression_level=compression_level, 266 ) 267 with get_validation_context().replace(warning_level=ERROR): 268 if isinstance((exported := load_description(output_path)), InvalidDescr): 269 exported.validation_summary.display() 270 msg = f"Exported package at '{output_path}' is invalid." 271 if allow_invalid: 272 logger.error(msg) 273 else: 274 raise ValueError(msg) 275 276 return output_path
Package a bioimageio resource as a zip file.
Arguments:
- rd: bioimageio resource description
- compression: The numeric constant of compression method.
- compression_level: Compression level to use when writing files to the archive. See https://docs.python.org/3/library/zipfile.html#zipfile.ZipFile
- output_path: file path to write package to
- weights_priority_order: If given only the first weights format present in the model is included. If none of the prioritized weights formats is found all are included.
Returns:
path to zipped bioimageio package
213def save_bioimageio_yaml_only( 214 rd: Union[ResourceDescr, BioimageioYamlContent, InvalidDescr], 215 /, 216 file: Union[NewPath, FilePath, TextIO], 217 *, 218 exclude_unset: bool = True, 219 exclude_defaults: bool = False, 220): 221 """write the metadata of a resource description (`rd`) to `file` 222 without writing any of the referenced files in it. 223 224 Args: 225 rd: bioimageio resource description 226 file: file or stream to save to 227 exclude_unset: Exclude fields that have not explicitly be set. 228 exclude_defaults: Exclude fields that have the default value (even if set explicitly). 229 230 Note: To save a resource description with its associated files as a package, 231 use `save_bioimageio_package` or `save_bioimageio_package_as_folder`. 232 """ 233 if isinstance(rd, ResourceDescrBase): 234 content = dump_description( 235 rd, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults 236 ) 237 else: 238 content = rd 239 240 write_yaml(cast(YamlValue, content), file)
write the metadata of a resource description (rd
) to file
without writing any of the referenced files in it.
Arguments:
- rd: bioimageio resource description
- file: file or stream to save to
- exclude_unset: Exclude fields that have not explicitly be set.
- exclude_defaults: Exclude fields that have the default value (even if set explicitly).
Note: To save a resource description with its associated files as a package,
use save_bioimageio_package
or save_bioimageio_package_as_folder
.
269def update_format( 270 source: Union[ 271 ResourceDescr, 272 PermissiveFileSource, 273 ZipFile, 274 BioimageioYamlContent, 275 InvalidDescr, 276 ], 277 /, 278 *, 279 output: Union[Path, TextIO, None] = None, 280 exclude_defaults: bool = True, 281 perform_io_checks: Optional[bool] = None, 282) -> Union[LatestResourceDescr, InvalidDescr]: 283 """Update a resource description. 284 285 Notes: 286 - Invalid **source** descriptions may fail to update. 287 - The updated description might be invalid (even if the **source** was valid). 288 """ 289 290 if isinstance(source, ResourceDescrBase): 291 root = source.root 292 source = dump_description(source) 293 else: 294 root = None 295 296 if isinstance(source, collections.abc.Mapping): 297 descr = build_description( 298 source, 299 context=get_validation_context().replace( 300 root=root, perform_io_checks=perform_io_checks 301 ), 302 format_version=LATEST, 303 ) 304 305 else: 306 descr = load_description( 307 source, 308 perform_io_checks=perform_io_checks, 309 format_version=LATEST, 310 ) 311 312 if output is not None: 313 save_bioimageio_yaml_only(descr, file=output, exclude_defaults=exclude_defaults) 314 315 return descr
Update a resource description.
Notes:
- Invalid source descriptions may fail to update.
- The updated description might be invalid (even if the source was valid).
318def update_hashes( 319 source: Union[PermissiveFileSource, ZipFile, ResourceDescr, BioimageioYamlContent], 320 /, 321) -> Union[ResourceDescr, InvalidDescr]: 322 """Update hash values of the files referenced in **source**.""" 323 if isinstance(source, ResourceDescrBase): 324 root = source.root 325 source = dump_description(source) 326 else: 327 root = None 328 329 context = get_validation_context().replace( 330 update_hashes=True, root=root, perform_io_checks=True 331 ) 332 with context: 333 if isinstance(source, collections.abc.Mapping): 334 return build_description(source) 335 else: 336 return load_description(source, perform_io_checks=True)
Update hash values of the files referenced in source.
28def upload( 29 source: Union[PermissiveFileSource, ZipFile, ResourceDescr, BioimageioYamlContent], 30 /, 31) -> HttpUrl: 32 """Upload a new resource description (version) to the hypha server to be shared at bioimage.io. 33 To edit an existing resource **version**, please login to https://bioimage.io and use the web interface. 34 35 WARNING: This upload function is in alpha stage and might change in the future. 36 37 Args: 38 source: The resource description to upload. 39 40 Returns: 41 A URL to the uploaded resource description. 42 Note: It might take some time until the resource is processed and available for download from the returned URL. 43 """ 44 45 if settings.hypha_upload_token is None: 46 raise ValueError( 47 """ 48Upload token is not set. Please set BIOIMAGEIO_HYPHA_UPLOAD_TOKEN in your environment variables. 49By setting this token you agree to our terms of service at https://bioimage.io/#/toc. 50 51How to obtain a token: 52 1. Login to https://bioimage.io 53 2. Generate a new token at https://bioimage.io/#/api?tab=hypha-rpc 54""" 55 ) 56 57 if isinstance(source, ResourceDescrBase): 58 # If source is already a ResourceDescr, we can use it directly 59 descr = source 60 elif isinstance(source, dict): 61 descr = build_description(source) 62 else: 63 descr = load_description(source) 64 65 if isinstance(descr, InvalidDescr): 66 raise ValueError("Uploading invalid resource descriptions is not allowed.") 67 68 if descr.type != "model": 69 raise NotImplementedError( 70 f"For now, only model resources can be uploaded (got type={descr.type})." 71 ) 72 73 if descr.id is not None: 74 raise ValueError( 75 "You cannot upload a resource with an id. Please remove the id from the description and make sure to upload a new non-existing resource. To edit an existing resource, please use the web interface at https://bioimage.io." 76 ) 77 78 content = get_resource_package_content(descr) 79 80 metadata = content[BIOIMAGEIO_YAML] 81 assert isinstance(metadata, dict) 82 manifest = dict(metadata) 83 84 # only admins can upload a resource with a version 85 artifact_version = "stage" # if descr.version is None else str(descr.version) 86 87 # Create new model 88 r = httpx.post( 89 settings.hypha_upload, 90 json={ 91 "parent_id": "bioimage-io/bioimage.io", 92 "alias": ( 93 descr.id or "{animal_adjective}-{animal}" 94 ), # TODO: adapt for non-model uploads, 95 "type": descr.type, 96 "manifest": manifest, 97 "version": artifact_version, 98 }, 99 headers=( 100 headers := { 101 "Authorization": f"Bearer {settings.hypha_upload_token}", 102 "Content-Type": "application/json", 103 } 104 ), 105 ) 106 107 response = r.json() 108 artifact_id = response.get("id") 109 if artifact_id is None: 110 try: 111 logger.error("Response detail: {}", "".join(response["detail"])) 112 except Exception: 113 logger.error("Response: {}", response) 114 115 raise RuntimeError(f"Upload did not return resource id: {response}") 116 else: 117 logger.info("Uploaded resource description {}", artifact_id) 118 119 for file_name, file_source in content.items(): 120 # Get upload URL for a file 121 response = httpx.post( 122 settings.hypha_upload.replace("/create", "/put_file"), 123 json={ 124 "artifact_id": artifact_id, 125 "file_path": file_name, 126 }, 127 headers=headers, 128 follow_redirects=True, 129 ) 130 upload_url = response.raise_for_status().json() 131 132 # Upload file to the provided URL 133 if isinstance(file_source, collections.abc.Mapping): 134 buf = io.BytesIO() 135 write_yaml(file_source, buf) 136 files = {file_name: buf} 137 else: 138 files = {file_name: get_reader(file_source)} 139 140 response = httpx.put( 141 upload_url, 142 files=files, # pyright: ignore[reportArgumentType] 143 # TODO: follow up on https://github.com/encode/httpx/discussions/3611 144 headers={"Content-Type": ""}, # Important for S3 uploads 145 follow_redirects=True, 146 ) 147 logger.info("Uploaded '{}' successfully", file_name) 148 149 # Update model status 150 manifest["status"] = "request-review" 151 response = httpx.post( 152 settings.hypha_upload.replace("/create", "/edit"), 153 json={ 154 "artifact_id": artifact_id, 155 "version": artifact_version, 156 "manifest": manifest, 157 }, 158 headers=headers, 159 follow_redirects=True, 160 ) 161 logger.info( 162 "Updated status of {}/{} to 'request-review'", artifact_id, artifact_version 163 ) 164 logger.warning( 165 "Upload successfull. Please note that the uploaded resource might not be available for download immediately." 166 ) 167 with get_validation_context().replace(perform_io_checks=False): 168 return HttpUrl( 169 f"https://hypha.aicell.io/bioimage-io/artifacts/{artifact_id}/files/rdf.yaml?version={artifact_version}" 170 )
Upload a new resource description (version) to the hypha server to be shared at bioimage.io. To edit an existing resource version, please login to https://bioimage.io and use the web interface.
WARNING: This upload function is in alpha stage and might change in the future.
Arguments:
- source: The resource description to upload.
Returns:
A URL to the uploaded resource description. Note: It might take some time until the resource is processed and available for download from the returned URL.
212def validate_format( 213 data: BioimageioYamlContent, 214 /, 215 *, 216 format_version: Union[Literal["discover", "latest"], str] = DISCOVER, 217 context: Optional[ValidationContext] = None, 218) -> ValidationSummary: 219 """Validate a dictionary holding a bioimageio description. 220 See `bioimagieo.spec.load_description_and_validate_format_only` 221 to validate a file source. 222 223 Args: 224 data: Dictionary holding the raw bioimageio.yaml content. 225 format_version: 226 Format version to (update to and) use for validation. 227 Note: 228 - Use "latest" to convert to the latest available format version. 229 - Use "discover" to use the format version specified in the RDF. 230 - Only considers major.minor format version, ignores patch version. 231 - Conversion to lower format versions is not supported. 232 context: Validation context, see `bioimagieo.spec.ValidationContext` 233 234 Note: 235 Use `bioimagieo.spec.load_description_and_validate_format_only` to validate a 236 file source instead of loading the YAML content and creating the appropriate 237 `ValidationContext`. 238 239 Alternatively you can use `bioimagieo.spec.load_description` and access the 240 `validation_summary` attribute of the returned object. 241 """ 242 with context or get_validation_context(): 243 rd = build_description(data, format_version=format_version) 244 245 assert rd.validation_summary is not None 246 return rd.validation_summary
Validate a dictionary holding a bioimageio description.
See bioimagieo.spec.load_description_and_validate_format_only
to validate a file source.
Arguments:
- data: Dictionary holding the raw bioimageio.yaml content.
- format_version: Format version to (update to and) use for validation.
Note:
- Use "latest" to convert to the latest available format version.
- Use "discover" to use the format version specified in the RDF.
- Only considers major.minor format version, ignores patch version.
- Conversion to lower format versions is not supported.
- context: Validation context, see
bioimagieo.spec.ValidationContext
Note:
Use
bioimagieo.spec.load_description_and_validate_format_only
to validate a file source instead of loading the YAML content and creating the appropriateValidationContext
.Alternatively you can use
bioimagieo.spec.load_description
and access thevalidation_summary
attribute of the returned object.
60@dataclass(frozen=True) 61class ValidationContext(ValidationContextBase): 62 """A validation context used to control validation of bioimageio resources. 63 64 For example a relative file path in a bioimageio description requires the **root** 65 context to evaluate if the file is available and, if **perform_io_checks** is true, 66 if it matches its expected SHA256 hash value. 67 """ 68 69 _context_tokens: "List[Token[Optional[ValidationContext]]]" = field( 70 init=False, 71 default_factory=cast( 72 "Callable[[], List[Token[Optional[ValidationContext]]]]", list 73 ), 74 ) 75 76 cache: Union[ 77 DiskCache[RootHttpUrl], MemoryCache[RootHttpUrl], NoopCache[RootHttpUrl] 78 ] = field(default=settings.disk_cache) 79 disable_cache: bool = False 80 """Disable caching downloads to `settings.cache_path` 81 and (re)download them to memory instead.""" 82 83 root: Union[RootHttpUrl, DirectoryPath, ZipFile] = Path() 84 """Url/directory/archive serving as base to resolve any relative file paths.""" 85 86 warning_level: WarningLevel = 50 87 """Treat warnings of severity `s` as validation errors if `s >= warning_level`.""" 88 89 log_warnings: bool = settings.log_warnings 90 """If `True` warnings are logged to the terminal 91 92 Note: This setting does not affect warning entries 93 of a generated `bioimageio.spec.ValidationSummary`. 94 """ 95 96 progressbar: Union[None, bool, Callable[[], Progressbar]] = None 97 """Control any progressbar. 98 (Currently this is only used for file downloads.) 99 100 Can be: 101 - `None`: use a default tqdm progressbar (if not settings.CI) 102 - `True`: use a default tqdm progressbar 103 - `False`: disable the progressbar 104 - `callable`: A callable that returns a tqdm-like progressbar. 105 """ 106 107 raise_errors: bool = False 108 """Directly raise any validation errors 109 instead of aggregating errors and returning a `bioimageio.spec.InvalidDescr`. (for debugging)""" 110 111 @property 112 def summary(self): 113 if isinstance(self.root, ZipFile): 114 if self.root.filename is None: 115 root = "in-memory" 116 else: 117 root = Path(self.root.filename) 118 else: 119 root = self.root 120 121 return ValidationContextSummary( 122 root=root, 123 file_name=self.file_name, 124 perform_io_checks=self.perform_io_checks, 125 known_files=copy(self.known_files), 126 update_hashes=self.update_hashes, 127 ) 128 129 def __enter__(self): 130 self._context_tokens.append(_validation_context_var.set(self)) 131 return self 132 133 def __exit__(self, type, value, traceback): # type: ignore 134 _validation_context_var.reset(self._context_tokens.pop(-1)) 135 136 def replace( # TODO: probably use __replace__ when py>=3.13 137 self, 138 root: Optional[Union[RootHttpUrl, DirectoryPath, ZipFile]] = None, 139 warning_level: Optional[WarningLevel] = None, 140 log_warnings: Optional[bool] = None, 141 file_name: Optional[str] = None, 142 perform_io_checks: Optional[bool] = None, 143 known_files: Optional[Dict[str, Optional[Sha256]]] = None, 144 raise_errors: Optional[bool] = None, 145 update_hashes: Optional[bool] = None, 146 original_source_name: Optional[str] = None, 147 ) -> Self: 148 if known_files is None and root is not None and self.root != root: 149 # reset known files if root changes, but no new known_files are given 150 known_files = {} 151 152 return self.__class__( 153 root=self.root if root is None else root, 154 warning_level=( 155 self.warning_level if warning_level is None else warning_level 156 ), 157 log_warnings=self.log_warnings if log_warnings is None else log_warnings, 158 file_name=self.file_name if file_name is None else file_name, 159 perform_io_checks=( 160 self.perform_io_checks 161 if perform_io_checks is None 162 else perform_io_checks 163 ), 164 known_files=self.known_files if known_files is None else known_files, 165 raise_errors=self.raise_errors if raise_errors is None else raise_errors, 166 update_hashes=( 167 self.update_hashes if update_hashes is None else update_hashes 168 ), 169 original_source_name=( 170 self.original_source_name 171 if original_source_name is None 172 else original_source_name 173 ), 174 ) 175 176 @property 177 def source_name(self) -> str: 178 if self.original_source_name is not None: 179 return self.original_source_name 180 elif self.file_name is None: 181 return "in-memory" 182 else: 183 try: 184 if isinstance(self.root, Path): 185 source = (self.root / self.file_name).absolute() 186 else: 187 parsed = urlsplit(str(self.root)) 188 path = list(parsed.path.strip("/").split("/")) + [self.file_name] 189 source = urlunsplit( 190 ( 191 parsed.scheme, 192 parsed.netloc, 193 "/".join(path), 194 parsed.query, 195 parsed.fragment, 196 ) 197 ) 198 except ValueError: 199 return self.file_name 200 else: 201 return str(source)
A validation context used to control validation of bioimageio resources.
For example a relative file path in a bioimageio description requires the root context to evaluate if the file is available and, if perform_io_checks is true, if it matches its expected SHA256 hash value.
Disable caching downloads to settings.cache_path
and (re)download them to memory instead.
Url/directory/archive serving as base to resolve any relative file paths.
Treat warnings of severity s
as validation errors if s >= warning_level
.
If True
warnings are logged to the terminal
Note: This setting does not affect warning entries
of a generated bioimageio.spec.ValidationSummary
.
Control any progressbar. (Currently this is only used for file downloads.)
Can be:
None
: use a default tqdm progressbar (if not settings.CI)True
: use a default tqdm progressbarFalse
: disable the progressbarcallable
: A callable that returns a tqdm-like progressbar.
Directly raise any validation errors
instead of aggregating errors and returning a bioimageio.spec.InvalidDescr
. (for debugging)
111 @property 112 def summary(self): 113 if isinstance(self.root, ZipFile): 114 if self.root.filename is None: 115 root = "in-memory" 116 else: 117 root = Path(self.root.filename) 118 else: 119 root = self.root 120 121 return ValidationContextSummary( 122 root=root, 123 file_name=self.file_name, 124 perform_io_checks=self.perform_io_checks, 125 known_files=copy(self.known_files), 126 update_hashes=self.update_hashes, 127 )
136 def replace( # TODO: probably use __replace__ when py>=3.13 137 self, 138 root: Optional[Union[RootHttpUrl, DirectoryPath, ZipFile]] = None, 139 warning_level: Optional[WarningLevel] = None, 140 log_warnings: Optional[bool] = None, 141 file_name: Optional[str] = None, 142 perform_io_checks: Optional[bool] = None, 143 known_files: Optional[Dict[str, Optional[Sha256]]] = None, 144 raise_errors: Optional[bool] = None, 145 update_hashes: Optional[bool] = None, 146 original_source_name: Optional[str] = None, 147 ) -> Self: 148 if known_files is None and root is not None and self.root != root: 149 # reset known files if root changes, but no new known_files are given 150 known_files = {} 151 152 return self.__class__( 153 root=self.root if root is None else root, 154 warning_level=( 155 self.warning_level if warning_level is None else warning_level 156 ), 157 log_warnings=self.log_warnings if log_warnings is None else log_warnings, 158 file_name=self.file_name if file_name is None else file_name, 159 perform_io_checks=( 160 self.perform_io_checks 161 if perform_io_checks is None 162 else perform_io_checks 163 ), 164 known_files=self.known_files if known_files is None else known_files, 165 raise_errors=self.raise_errors if raise_errors is None else raise_errors, 166 update_hashes=( 167 self.update_hashes if update_hashes is None else update_hashes 168 ), 169 original_source_name=( 170 self.original_source_name 171 if original_source_name is None 172 else original_source_name 173 ), 174 )
176 @property 177 def source_name(self) -> str: 178 if self.original_source_name is not None: 179 return self.original_source_name 180 elif self.file_name is None: 181 return "in-memory" 182 else: 183 try: 184 if isinstance(self.root, Path): 185 source = (self.root / self.file_name).absolute() 186 else: 187 parsed = urlsplit(str(self.root)) 188 path = list(parsed.path.strip("/").split("/")) + [self.file_name] 189 source = urlunsplit( 190 ( 191 parsed.scheme, 192 parsed.netloc, 193 "/".join(path), 194 parsed.query, 195 parsed.fragment, 196 ) 197 ) 198 except ValueError: 199 return self.file_name 200 else: 201 return str(source)
243class ValidationSummary(BaseModel, extra="allow"): 244 """Summarizes output of all bioimageio validations and tests 245 for one specific `ResourceDescr` instance.""" 246 247 name: str 248 """Name of the validation""" 249 source_name: str 250 """Source of the validated bioimageio description""" 251 id: Optional[str] = None 252 """ID of the resource being validated""" 253 type: str 254 """Type of the resource being validated""" 255 format_version: str 256 """Format version of the resource being validated""" 257 status: Literal["passed", "valid-format", "failed"] 258 """overall status of the bioimageio validation""" 259 metadata_completeness: Annotated[float, annotated_types.Interval(ge=0, le=1)] = 0.0 260 """Estimate of completeness of the metadata in the resource description. 261 262 Note: This completeness estimate may change with subsequent releases 263 and should be considered bioimageio.spec version specific. 264 """ 265 266 details: List[ValidationDetail] 267 """List of validation details""" 268 env: Set[InstalledPackage] = Field( 269 default_factory=lambda: { 270 InstalledPackage( 271 name="bioimageio.spec", 272 version=VERSION, 273 ) 274 } 275 ) 276 """List of selected, relevant package versions""" 277 278 saved_conda_list: Optional[str] = None 279 280 @field_serializer("saved_conda_list") 281 def _save_conda_list(self, value: Optional[str]): 282 return self.conda_list 283 284 @property 285 def conda_list(self): 286 if self.saved_conda_list is None: 287 p = subprocess.run( 288 [CONDA_CMD, "list"], 289 stdout=subprocess.PIPE, 290 stderr=subprocess.STDOUT, 291 shell=False, 292 text=True, 293 ) 294 self.saved_conda_list = ( 295 p.stdout or f"`conda list` exited with {p.returncode}" 296 ) 297 298 return self.saved_conda_list 299 300 @property 301 def status_icon(self): 302 if self.status == "passed": 303 return "✔️" 304 elif self.status == "valid-format": 305 return "🟡" 306 else: 307 return "❌" 308 309 @property 310 def errors(self) -> List[ErrorEntry]: 311 return list(chain.from_iterable(d.errors for d in self.details)) 312 313 @property 314 def warnings(self) -> List[WarningEntry]: 315 return list(chain.from_iterable(d.warnings for d in self.details)) 316 317 def format( 318 self, 319 *, 320 width: Optional[int] = None, 321 include_conda_list: bool = False, 322 ): 323 """Format summary as Markdown string""" 324 return self._format( 325 width=width, target="md", include_conda_list=include_conda_list 326 ) 327 328 format_md = format 329 330 def format_html( 331 self, 332 *, 333 width: Optional[int] = None, 334 include_conda_list: bool = False, 335 ): 336 md_with_html = self._format( 337 target="html", width=width, include_conda_list=include_conda_list 338 ) 339 return markdown.markdown( 340 md_with_html, extensions=["tables", "fenced_code", "nl2br"] 341 ) 342 343 def display( 344 self, 345 *, 346 width: Optional[int] = None, 347 include_conda_list: bool = False, 348 tab_size: int = 4, 349 soft_wrap: bool = True, 350 ) -> None: 351 try: # render as HTML in Jupyter notebook 352 from IPython.core.getipython import get_ipython 353 from IPython.display import ( 354 display_html, # pyright: ignore[reportUnknownVariableType] 355 ) 356 except ImportError: 357 pass 358 else: 359 if get_ipython() is not None: 360 _ = display_html( 361 self.format_html( 362 width=width, include_conda_list=include_conda_list 363 ), 364 raw=True, 365 ) 366 return 367 368 # render with rich 369 _ = self._format( 370 target=rich.console.Console( 371 width=width, 372 tab_size=tab_size, 373 soft_wrap=soft_wrap, 374 ), 375 width=width, 376 include_conda_list=include_conda_list, 377 ) 378 379 def add_detail(self, detail: ValidationDetail): 380 if detail.status == "failed": 381 self.status = "failed" 382 elif detail.status != "passed": 383 assert_never(detail.status) 384 385 self.details.append(detail) 386 387 def log( 388 self, 389 to: Union[Literal["display"], Path, Sequence[Union[Literal["display"], Path]]], 390 ) -> List[Path]: 391 """Convenience method to display the validation summary in the terminal and/or 392 save it to disk. See `save` for details.""" 393 if to == "display": 394 display = True 395 save_to = [] 396 elif isinstance(to, Path): 397 display = False 398 save_to = [to] 399 else: 400 display = "display" in to 401 save_to = [p for p in to if p != "display"] 402 403 if display: 404 self.display() 405 406 return self.save(save_to) 407 408 def save( 409 self, path: Union[Path, Sequence[Path]] = Path("{id}_summary_{now}") 410 ) -> List[Path]: 411 """Save the validation/test summary in JSON, Markdown or HTML format. 412 413 Returns: 414 List of file paths the summary was saved to. 415 416 Notes: 417 - Format is chosen based on the suffix: `.json`, `.md`, `.html`. 418 - If **path** has no suffix it is assumed to be a direcotry to which a 419 `summary.json`, `summary.md` and `summary.html` are saved to. 420 """ 421 if isinstance(path, (str, Path)): 422 path = [Path(path)] 423 424 # folder to file paths 425 file_paths: List[Path] = [] 426 for p in path: 427 if p.suffix: 428 file_paths.append(p) 429 else: 430 file_paths.extend( 431 [ 432 p / "summary.json", 433 p / "summary.md", 434 p / "summary.html", 435 ] 436 ) 437 438 now = datetime.now(timezone.utc).strftime("%Y%m%dT%H%M%SZ") 439 for p in file_paths: 440 p = Path(str(p).format(id=self.id or "bioimageio", now=now)) 441 if p.suffix == ".json": 442 self.save_json(p) 443 elif p.suffix == ".md": 444 self.save_markdown(p) 445 elif p.suffix == ".html": 446 self.save_html(p) 447 else: 448 raise ValueError(f"Unknown summary path suffix '{p.suffix}'") 449 450 return file_paths 451 452 def save_json( 453 self, path: Path = Path("summary.json"), *, indent: Optional[int] = 2 454 ): 455 """Save validation/test summary as JSON file.""" 456 json_str = self.model_dump_json(indent=indent) 457 path.parent.mkdir(exist_ok=True, parents=True) 458 _ = path.write_text(json_str, encoding="utf-8") 459 logger.info("Saved summary to {}", path.absolute()) 460 461 def save_markdown(self, path: Path = Path("summary.md")): 462 """Save rendered validation/test summary as Markdown file.""" 463 formatted = self.format_md() 464 path.parent.mkdir(exist_ok=True, parents=True) 465 _ = path.write_text(formatted, encoding="utf-8") 466 logger.info("Saved Markdown formatted summary to {}", path.absolute()) 467 468 def save_html(self, path: Path = Path("summary.html")) -> None: 469 """Save rendered validation/test summary as HTML file.""" 470 path.parent.mkdir(exist_ok=True, parents=True) 471 472 html = self.format_html() 473 _ = path.write_text(html, encoding="utf-8") 474 logger.info("Saved HTML formatted summary to {}", path.absolute()) 475 476 @classmethod 477 def load_json(cls, path: Path) -> Self: 478 """Load validation/test summary from a suitable JSON file""" 479 json_str = Path(path).read_text(encoding="utf-8") 480 return cls.model_validate_json(json_str) 481 482 @field_validator("env", mode="before") 483 def _convert_dict(cls, value: List[Union[List[str], Dict[str, str]]]): 484 """convert old env value for backwards compatibility""" 485 if isinstance(value, list): 486 return [ 487 ( 488 (v["name"], v["version"], v.get("build", ""), v.get("channel", "")) 489 if isinstance(v, dict) and "name" in v and "version" in v 490 else v 491 ) 492 for v in value 493 ] 494 else: 495 return value 496 497 def _format( 498 self, 499 *, 500 target: Union[rich.console.Console, Literal["html", "md"]], 501 width: Optional[int], 502 include_conda_list: bool, 503 ): 504 return _format_summary( 505 self, 506 target=target, 507 width=width or 100, 508 include_conda_list=include_conda_list, 509 )
Summarizes output of all bioimageio validations and tests
for one specific ResourceDescr
instance.
Estimate of completeness of the metadata in the resource description.
Note: This completeness estimate may change with subsequent releases and should be considered bioimageio.spec version specific.
284 @property 285 def conda_list(self): 286 if self.saved_conda_list is None: 287 p = subprocess.run( 288 [CONDA_CMD, "list"], 289 stdout=subprocess.PIPE, 290 stderr=subprocess.STDOUT, 291 shell=False, 292 text=True, 293 ) 294 self.saved_conda_list = ( 295 p.stdout or f"`conda list` exited with {p.returncode}" 296 ) 297 298 return self.saved_conda_list
317 def format( 318 self, 319 *, 320 width: Optional[int] = None, 321 include_conda_list: bool = False, 322 ): 323 """Format summary as Markdown string""" 324 return self._format( 325 width=width, target="md", include_conda_list=include_conda_list 326 )
Format summary as Markdown string
317 def format( 318 self, 319 *, 320 width: Optional[int] = None, 321 include_conda_list: bool = False, 322 ): 323 """Format summary as Markdown string""" 324 return self._format( 325 width=width, target="md", include_conda_list=include_conda_list 326 )
Format summary as Markdown string
330 def format_html( 331 self, 332 *, 333 width: Optional[int] = None, 334 include_conda_list: bool = False, 335 ): 336 md_with_html = self._format( 337 target="html", width=width, include_conda_list=include_conda_list 338 ) 339 return markdown.markdown( 340 md_with_html, extensions=["tables", "fenced_code", "nl2br"] 341 )
343 def display( 344 self, 345 *, 346 width: Optional[int] = None, 347 include_conda_list: bool = False, 348 tab_size: int = 4, 349 soft_wrap: bool = True, 350 ) -> None: 351 try: # render as HTML in Jupyter notebook 352 from IPython.core.getipython import get_ipython 353 from IPython.display import ( 354 display_html, # pyright: ignore[reportUnknownVariableType] 355 ) 356 except ImportError: 357 pass 358 else: 359 if get_ipython() is not None: 360 _ = display_html( 361 self.format_html( 362 width=width, include_conda_list=include_conda_list 363 ), 364 raw=True, 365 ) 366 return 367 368 # render with rich 369 _ = self._format( 370 target=rich.console.Console( 371 width=width, 372 tab_size=tab_size, 373 soft_wrap=soft_wrap, 374 ), 375 width=width, 376 include_conda_list=include_conda_list, 377 )
387 def log( 388 self, 389 to: Union[Literal["display"], Path, Sequence[Union[Literal["display"], Path]]], 390 ) -> List[Path]: 391 """Convenience method to display the validation summary in the terminal and/or 392 save it to disk. See `save` for details.""" 393 if to == "display": 394 display = True 395 save_to = [] 396 elif isinstance(to, Path): 397 display = False 398 save_to = [to] 399 else: 400 display = "display" in to 401 save_to = [p for p in to if p != "display"] 402 403 if display: 404 self.display() 405 406 return self.save(save_to)
Convenience method to display the validation summary in the terminal and/or
save it to disk. See save
for details.
408 def save( 409 self, path: Union[Path, Sequence[Path]] = Path("{id}_summary_{now}") 410 ) -> List[Path]: 411 """Save the validation/test summary in JSON, Markdown or HTML format. 412 413 Returns: 414 List of file paths the summary was saved to. 415 416 Notes: 417 - Format is chosen based on the suffix: `.json`, `.md`, `.html`. 418 - If **path** has no suffix it is assumed to be a direcotry to which a 419 `summary.json`, `summary.md` and `summary.html` are saved to. 420 """ 421 if isinstance(path, (str, Path)): 422 path = [Path(path)] 423 424 # folder to file paths 425 file_paths: List[Path] = [] 426 for p in path: 427 if p.suffix: 428 file_paths.append(p) 429 else: 430 file_paths.extend( 431 [ 432 p / "summary.json", 433 p / "summary.md", 434 p / "summary.html", 435 ] 436 ) 437 438 now = datetime.now(timezone.utc).strftime("%Y%m%dT%H%M%SZ") 439 for p in file_paths: 440 p = Path(str(p).format(id=self.id or "bioimageio", now=now)) 441 if p.suffix == ".json": 442 self.save_json(p) 443 elif p.suffix == ".md": 444 self.save_markdown(p) 445 elif p.suffix == ".html": 446 self.save_html(p) 447 else: 448 raise ValueError(f"Unknown summary path suffix '{p.suffix}'") 449 450 return file_paths
Save the validation/test summary in JSON, Markdown or HTML format.
Returns:
List of file paths the summary was saved to.
Notes:
- Format is chosen based on the suffix:
.json
,.md
,.html
. - If path has no suffix it is assumed to be a direcotry to which a
summary.json
,summary.md
andsummary.html
are saved to.
452 def save_json( 453 self, path: Path = Path("summary.json"), *, indent: Optional[int] = 2 454 ): 455 """Save validation/test summary as JSON file.""" 456 json_str = self.model_dump_json(indent=indent) 457 path.parent.mkdir(exist_ok=True, parents=True) 458 _ = path.write_text(json_str, encoding="utf-8") 459 logger.info("Saved summary to {}", path.absolute())
Save validation/test summary as JSON file.
461 def save_markdown(self, path: Path = Path("summary.md")): 462 """Save rendered validation/test summary as Markdown file.""" 463 formatted = self.format_md() 464 path.parent.mkdir(exist_ok=True, parents=True) 465 _ = path.write_text(formatted, encoding="utf-8") 466 logger.info("Saved Markdown formatted summary to {}", path.absolute())
Save rendered validation/test summary as Markdown file.
468 def save_html(self, path: Path = Path("summary.html")) -> None: 469 """Save rendered validation/test summary as HTML file.""" 470 path.parent.mkdir(exist_ok=True, parents=True) 471 472 html = self.format_html() 473 _ = path.write_text(html, encoding="utf-8") 474 logger.info("Saved HTML formatted summary to {}", path.absolute())
Save rendered validation/test summary as HTML file.
476 @classmethod 477 def load_json(cls, path: Path) -> Self: 478 """Load validation/test summary from a suitable JSON file""" 479 json_str = Path(path).read_text(encoding="utf-8") 480 return cls.model_validate_json(json_str)
Load validation/test summary from a suitable JSON file