Coverage for src / bioimageio / spec / _hf_card.py: 83%
294 statements
« prev ^ index » next coverage.py v7.13.4, created at 2026-02-23 10:51 +0000
« prev ^ index » next coverage.py v7.13.4, created at 2026-02-23 10:51 +0000
1import collections.abc
2import warnings
3from functools import partial
4from pathlib import PurePosixPath
5from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
7import numpy as np
8from imageio.v3 import imwrite # pyright: ignore[reportUnknownVariableType]
9from loguru import logger
10from numpy.typing import NDArray
11from typing_extensions import assert_never
13from bioimageio.spec._internal.validation_context import get_validation_context
14from bioimageio.spec.model.v0_5 import (
15 IntervalOrRatioDataDescr,
16 KerasHdf5WeightsDescr,
17 NominalOrOrdinalDataDescr,
18 OnnxWeightsDescr,
19 PytorchStateDictWeightsDescr,
20 TensorflowJsWeightsDescr,
21 TensorflowSavedModelBundleWeightsDescr,
22 TensorId,
23 TorchscriptWeightsDescr,
24)
26from ._internal.io import RelativeFilePath, get_reader
27from ._internal.io_utils import load_array
28from ._version import VERSION
29from .model import ModelDescr
30from .utils import get_spdx_licenses, load_image
32HF_KNOWN_LICENSES = (
33 "apache-2.0",
34 "mit",
35 "openrail",
36 "bigscience-openrail-m",
37 "creativeml-openrail-m",
38 "bigscience-bloom-rail-1.0",
39 "bigcode-openrail-m",
40 "afl-3.0",
41 "artistic-2.0",
42 "bsl-1.0",
43 "bsd",
44 "bsd-2-clause",
45 "bsd-3-clause",
46 "bsd-3-clause-clear",
47 "c-uda",
48 "cc",
49 "cc0-1.0",
50 "cc-by-2.0",
51 "cc-by-2.5",
52 "cc-by-3.0",
53 "cc-by-4.0",
54 "cc-by-sa-3.0",
55 "cc-by-sa-4.0",
56 "cc-by-nc-2.0",
57 "cc-by-nc-3.0",
58 "cc-by-nc-4.0",
59 "cc-by-nd-4.0",
60 "cc-by-nc-nd-3.0",
61 "cc-by-nc-nd-4.0",
62 "cc-by-nc-sa-2.0",
63 "cc-by-nc-sa-3.0",
64 "cc-by-nc-sa-4.0",
65 "cdla-sharing-1.0",
66 "cdla-permissive-1.0",
67 "cdla-permissive-2.0",
68 "wtfpl",
69 "ecl-2.0",
70 "epl-1.0",
71 "epl-2.0",
72 "etalab-2.0",
73 "eupl-1.1",
74 "eupl-1.2",
75 "agpl-3.0",
76 "gfdl",
77 "gpl",
78 "gpl-2.0",
79 "gpl-3.0",
80 "lgpl",
81 "lgpl-2.1",
82 "lgpl-3.0",
83 "isc",
84 "h-research",
85 "intel-research",
86 "lppl-1.3c",
87 "ms-pl",
88 "apple-ascl",
89 "apple-amlr",
90 "mpl-2.0",
91 "odc-by",
92 "odbl",
93 "openmdw-1.0",
94 "openrail++",
95 "osl-3.0",
96 "postgresql",
97 "ofl-1.1",
98 "ncsa",
99 "unlicense",
100 "zlib",
101 "pddl",
102 "lgpl-lr",
103 "deepfloyd-if-license",
104 "fair-noncommercial-research-license",
105 "llama2",
106 "llama3",
107 "llama3.1",
108 "llama3.2",
109 "llama3.3",
110 "llama4",
111 "grok2-community",
112 "gemma",
113)
116def _generate_png_from_tensor(tensor: NDArray[np.generic]) -> Optional[bytes]:
117 """Generate PNG bytes from a sample tensor.
119 Prefers 2D slices from multi-dimensional arrays.
120 Returns PNG bytes or None if generation fails.
121 """
122 try:
123 # Squeeze out singleton dimensions
124 arr = np.squeeze(tensor)
126 # Handle different dimensionalities
127 if arr.ndim == 2:
128 img_data = arr
129 elif arr.ndim == 3:
130 # Could be (H, W, C) or (Z, H, W)
131 if arr.shape[-1] in [1, 3, 4]: # Likely channels last
132 img_data = arr
133 else: # Take middle slice
134 img_data = arr[arr.shape[0] // 2]
135 elif arr.ndim == 4:
136 # Take middle slices (e.g., batch, z, y, x)
137 img_data = (
138 arr[0, arr.shape[1] // 2]
139 if arr.shape[0] == 1
140 else arr[arr.shape[0] // 2, arr.shape[1] // 2]
141 )
142 elif arr.ndim > 4:
143 # Take middle slices of all extra dimensions
144 slices = tuple(s // 2 for s in arr.shape[:-2])
145 img_data = arr[slices]
146 else:
147 return None
149 # Normalize to 0-255 uint8
150 img_data = np.squeeze(img_data)
151 if img_data.dtype != np.uint8:
152 img_min, img_max = img_data.min(), img_data.max()
153 if img_max > img_min:
154 img_data: NDArray[Any] = (img_data - img_min) / (img_max - img_min)
155 else:
156 img_data = np.zeros_like(img_data)
157 img_data = (img_data * 255).astype(np.uint8)
158 return imwrite("<bytes>", img_data, extension=".png")
159 except Exception:
160 return None
163def _get_io_description(
164 model: ModelDescr,
165) -> Tuple[str, Dict[str, bytes], List[TensorId], List[TensorId]]:
166 """Generate a description of model inputs and outputs with sample images.
168 Returns:
169 A tuple of (markdown_string, referenced_files_dict, input_ids, output_ids) where referenced_files_dict maps
170 filenames to file bytes.
171 """
172 markdown_string = ""
173 referenced_files: dict[str, bytes] = {}
174 input_ids: List[TensorId] = []
175 output_ids: List[TensorId] = []
177 def format_data_descr(
178 d: Union[
179 NominalOrOrdinalDataDescr,
180 IntervalOrRatioDataDescr,
181 Sequence[Union[NominalOrOrdinalDataDescr, IntervalOrRatioDataDescr]],
182 ],
183 ) -> str:
184 ret = ""
185 if isinstance(d, NominalOrOrdinalDataDescr):
186 ret += f" - Values: {d.values}\n"
187 elif isinstance(d, IntervalOrRatioDataDescr):
188 ret += f" - Value unit: {d.unit}\n"
189 ret += f" - Value scale factor: {d.scale}\n"
190 if d.offset is not None:
191 ret += f" - Value offset: {d.offset}\n"
192 elif d.range[0] is not None:
193 ret += f" - Value minimum: {d.range[0]}\n"
194 elif d.range[1] is not None:
195 ret += f" - Value maximum: {d.range[1]}\n"
196 elif isinstance(d, collections.abc.Sequence):
197 for dd in d:
198 ret += format_data_descr(dd)
199 else:
200 assert_never(d)
202 return ret
204 # Input descriptions
205 if model.inputs:
206 markdown_string += "\n- **Input specifications:**\n"
208 for inp in model.inputs:
209 input_ids.append(inp.id)
210 axes_str = ", ".join(str(a.id) for a in inp.axes)
211 shape_str = " × ".join(
212 str(a.size) if isinstance(a.size, int) else str(a.size)
213 for a in inp.axes
214 )
216 markdown_string += f" `{inp.id}`: {inp.description or ''}\n\n"
217 markdown_string += f" - Axes: `{axes_str}`\n"
218 markdown_string += f" - Shape: `{shape_str}`\n"
219 markdown_string += f" - Data type: `{inp.dtype}`\n"
220 markdown_string += format_data_descr(inp.data)
222 # Try to load and display sample_tensor (preferred) or test_tensor
223 img_bytes = None
224 if inp.sample_tensor is not None:
225 try:
226 arr = load_image(inp.sample_tensor)
227 img_bytes = _generate_png_from_tensor(arr)
228 except Exception as e:
229 logger.error("failed to generate input sample image: {}", e)
231 if img_bytes is None and inp.test_tensor is not None:
232 try:
233 arr = load_array(inp.test_tensor)
234 img_bytes = _generate_png_from_tensor(arr)
235 except Exception as e:
236 logger.error(
237 "failed to generate input sample image from test data: {}", e
238 )
240 if img_bytes:
241 filename = f"images/input_{inp.id}_sample.png"
242 referenced_files[filename] = img_bytes
243 markdown_string += f" - example\n \n"
245 # Output descriptions
246 if model.outputs:
247 markdown_string += "\n- **Output specifications:**\n"
248 for out in model.outputs:
249 output_ids.append(out.id)
250 axes_str = ", ".join(str(a.id) for a in out.axes)
251 shape_str = " × ".join(
252 str(a.size) if isinstance(a.size, int) else str(a.size)
253 for a in out.axes
254 )
256 markdown_string += f" `{out.id}`: {out.description or ''}\n"
257 markdown_string += f" - Axes: `{axes_str}`\n"
258 markdown_string += f" - Shape: `{shape_str}`\n"
259 markdown_string += f" - Data type: `{out.dtype}`\n"
260 markdown_string += format_data_descr(out.data)
262 # Try to load and display sample_tensor (preferred) or test_tensor
263 img_bytes = None
264 if out.sample_tensor is not None:
265 try:
266 arr = load_image(out.sample_tensor)
267 img_bytes = _generate_png_from_tensor(arr)
268 except Exception as e:
269 logger.error("failed to generate output sample image: {}", e)
271 if img_bytes is None and out.test_tensor is not None:
272 try:
273 arr = load_array(out.test_tensor)
274 img_bytes = _generate_png_from_tensor(arr)
275 except Exception as e:
276 logger.error(
277 "failed to generate output sample image from test data: {}", e
278 )
280 if img_bytes:
281 filename = f"images/output_{out.id}_sample.png"
282 referenced_files[filename] = img_bytes
283 markdown_string += f" - example\n {out.id} sample]({filename})\n"
285 return markdown_string, referenced_files, input_ids, output_ids
288def create_huggingface_model_card(
289 model: ModelDescr, *, repo_id: str
290) -> Tuple[str, Dict[str, bytes]]:
291 """Create a Hugging Face model card for a BioImage.IO model.
293 Returns:
294 A tuple of (markdown_string, images_dict) where images_dict maps
295 filenames to PNG bytes that should be saved alongside the markdown.
296 """
297 if model.version is None:
298 model_version = ""
299 else:
300 model_version = f"\n- **model version:** {model.version}"
302 if model.documentation is None:
303 additional_model_doc = ""
304 else:
305 doc_reader = get_reader(model.documentation)
306 local_doc_path = f"package/{doc_reader.original_file_name}"
307 model = model.model_copy()
308 with get_validation_context().replace(perform_io_checks=False):
309 model.documentation = RelativeFilePath(PurePosixPath(local_doc_path))
311 additional_model_doc = f"\n- **Additional model documentation:** [{local_doc_path}]({local_doc_path})"
313 if model.cite:
314 developed_by = "\n- **Developed by:** " + (
315 "".join(
316 (
317 f"\n - {c.text}: "
318 + (f"https://www.doi.org/{c.doi}" if c.doi else str(c.url))
319 )
320 for c in model.cite
321 )
322 )
323 else:
324 developed_by = ""
326 if model.config.bioimageio.funded_by:
327 funded_by = f"\n- **Funded by:** {model.config.bioimageio.funded_by}"
328 else:
329 funded_by = ""
331 if model.authors:
332 shared_by = "\n- **Shared by:** " + (
333 "".join(
334 (
335 f"\n - {a.name}"
336 + (f", {a.affiliation}" if a.affiliation else "")
337 + (
338 f", [https://orcid.org/{a.orcid}](https://orcid.org/{a.orcid})"
339 if a.orcid
340 else ""
341 )
342 + (
343 f", [https://github.com/{a.github_user}](https://github.com/{a.github_user})"
344 if a.github_user
345 else ""
346 )
347 for a in model.authors
348 )
349 )
350 )
351 else:
352 shared_by = ""
354 if model.config.bioimageio.architecture_type:
355 model_type = f"\n- **Model type:** {model.config.bioimageio.architecture_type}"
356 else:
357 model_type = ""
359 if model.config.bioimageio.modality:
360 model_modality = f"\n- **Modality:** {model.config.bioimageio.modality}"
361 else:
362 model_modality = ""
364 if model.config.bioimageio.target_structure:
365 target_structures = "\n- **Target structures:** " + ", ".join(
366 model.config.bioimageio.target_structure
367 )
368 else:
369 target_structures = ""
371 if model.config.bioimageio.task:
372 task_type = f"\n- **Task type:** {model.config.bioimageio.task}"
373 else:
374 task_type = ""
376 if model.parent:
377 finetuned_from = f"\n- **Finetuned from model:** {model.parent.id}"
378 else:
379 finetuned_from = ""
381 repository = (
382 f"[{model.git_repo}]({model.git_repo})" if model.git_repo else "missing"
383 )
385 dl_framework_parts: List[str] = []
386 training_frameworks: List[str] = []
387 model_size: Optional[str] = None
388 for weights in model.weights.available_formats.values():
389 if isinstance(weights, (PytorchStateDictWeightsDescr, TorchscriptWeightsDescr)):
390 dl_framework_version = weights.pytorch_version
391 elif isinstance(
392 weights,
393 (
394 TensorflowSavedModelBundleWeightsDescr,
395 TensorflowJsWeightsDescr,
396 KerasHdf5WeightsDescr,
397 ),
398 ):
399 dl_framework_version = weights.tensorflow_version
400 elif isinstance(weights, OnnxWeightsDescr):
401 dl_framework_version = f"opset version: {weights.opset_version}"
402 else:
403 assert_never(weights)
405 if weights.parent is None:
406 training_frameworks.append(weights.weights_format_name)
408 dl_framework_parts.append(
409 f"\n - {weights.weights_format_name}: {dl_framework_version}"
410 )
412 if model_size is None:
413 s = 0
414 r = weights.get_reader()
415 for chunk in iter(partial(r.read, 128 * 1024), b""):
416 s += len(chunk)
418 if model.config.bioimageio.model_parameter_count is not None:
419 if model.config.bioimageio.model_parameter_count < 1e9:
420 model_size = f"{model.config.bioimageio.model_parameter_count / 1e6:.2f} million parameters, "
421 else:
422 model_size = f"{model.config.bioimageio.model_parameter_count / 1e9:.2f} billion parameters, "
423 else:
424 model_size = ""
426 if s < 1e9:
427 model_size += f"{s / 1e6:.2f} MB"
428 else:
429 model_size += f"{s / 1e9:.2f} GB"
431 dl_frameworks = "".join(dl_framework_parts)
432 if len(training_frameworks) > 1:
433 warnings.warn(
434 "Multiple training frameworks detected. (Some weight formats are probably missing a `parent` reference.)"
435 )
437 if (
438 model.weights.pytorch_state_dict is not None
439 and model.weights.pytorch_state_dict.dependencies is not None
440 ):
441 env_reader = model.weights.pytorch_state_dict.dependencies.get_reader()
442 dependencies = f"Dependencies for Pytorch State dict weights are listed in [{env_reader.original_file_name}](package/{env_reader.original_file_name})."
443 else:
444 dependencies = "None beyond the respective framework library."
446 out_of_scope_use = (
447 model.config.bioimageio.out_of_scope_use
448 if model.config.bioimageio.out_of_scope_use
449 else """missing; therefore these typical limitations should be considered:
451- *Likely not suitable for diagnostic purposes.*
452- *Likely not validated for different imaging modalities than present in the training data.*
453- *Should not be used without proper validation on user's specific datasets.*
455"""
456 )
458 environmental_impact = model.config.bioimageio.environmental_impact.format_md()
459 if environmental_impact:
460 environmental_impact_toc_entry = (
461 "\n- [Environmental Impact](#environmental-impact)"
462 )
463 else:
464 environmental_impact_toc_entry = ""
466 evaluation_parts: List[str] = []
467 n_evals = 0
468 for e in model.config.bioimageio.evaluations:
469 if e.dataset_role == "independent":
470 continue # treated separately below
472 n_evals += 1
473 n_evals_str = "" if n_evals == 1 else f" {n_evals}"
474 evaluation_parts.append(f"\n# Evaluation{n_evals_str}\n")
475 evaluation_parts.append(e.format_md())
477 n_evals = 0
478 for e in model.config.bioimageio.evaluations:
479 if e.dataset_role != "independent":
480 continue # treated separately above
482 n_evals += 1
483 n_evals_str = "" if n_evals == 1 else f" {n_evals}"
485 evaluation_parts.append(f"### Validation on External Data{n_evals_str}\n")
486 evaluation_parts.append(e.format_md())
488 if evaluation_parts:
489 evaluation = "\n".join(evaluation_parts)
490 evaluation_toc_entry = "\n- [Evaluation](#evaluation)"
491 else:
492 evaluation = ""
493 evaluation_toc_entry = ""
495 training_details = ""
496 if model.config.bioimageio.training.training_preprocessing:
497 training_details += f"### Preprocessing\n\n{model.config.bioimageio.training.training_preprocessing}\n\n"
499 training_details += "### Training Hyperparameters\n\n"
500 training_details += f"- **Framework:** {' / '.join(training_frameworks)}"
501 if model.config.bioimageio.training.training_epochs is not None:
502 training_details += (
503 f"- **Epochs:** {model.config.bioimageio.training.training_epochs}\n"
504 )
506 if model.config.bioimageio.training.training_batch_size is not None:
507 training_details += f"- **Batch size:** {model.config.bioimageio.training.training_batch_size}\n"
509 if model.config.bioimageio.training.initial_learning_rate is not None:
510 training_details += f"- **Initial learning rate:** {model.config.bioimageio.training.initial_learning_rate}\n"
512 if model.config.bioimageio.training.learning_rate_schedule is not None:
513 training_details += f"- **Learning rate schedule:** {model.config.bioimageio.training.learning_rate_schedule}\n"
515 if model.config.bioimageio.training.loss_function is not None:
516 training_details += (
517 f"- **Loss function:** {model.config.bioimageio.training.loss_function}"
518 )
519 if model.config.bioimageio.training.loss_function_kwargs:
520 training_details += (
521 f" with {model.config.bioimageio.training.loss_function_kwargs}"
522 )
523 training_details += "\n"
525 if model.config.bioimageio.training.optimizer is not None:
526 training_details += (
527 f"- **Optimizer:** {model.config.bioimageio.training.optimizer}"
528 )
529 if model.config.bioimageio.training.optimizer_kwargs:
530 training_details += (
531 f" with {model.config.bioimageio.training.optimizer_kwargs}"
532 )
533 training_details += "\n"
535 if model.config.bioimageio.training.regularization is not None:
536 training_details += (
537 f"- **Regularization:** {model.config.bioimageio.training.regularization}\n"
538 )
540 speeds_sizes_times = "### Speeds, Sizes, Times\n\n"
541 if model.config.bioimageio.training.training_duration is not None:
542 speeds_sizes_times += f"- **Training time:** {'{:.2f}'.format(model.config.bioimageio.training.training_duration)}\n"
544 speeds_sizes_times += f"- **Model size:** {model_size}\n"
545 if model.config.bioimageio.inference_time:
546 speeds_sizes_times += (
547 f"- **Inference time:** {model.config.bioimageio.inference_time}\n"
548 )
550 if model.config.bioimageio.memory_requirements_inference:
551 speeds_sizes_times += f"- **Memory requirements:** {model.config.bioimageio.memory_requirements_inference}\n"
553 model_arch_and_objective = "## Model Architecture and Objective\n\n"
554 if (
555 model.config.bioimageio.architecture_type
556 or model.config.bioimageio.architecture_description
557 ):
558 model_arch_and_objective += (
559 f"- **Architecture:** {model.config.bioimageio.architecture_type or ''}"
560 + (
561 " --- "
562 if model.config.bioimageio.architecture_type
563 and model.config.bioimageio.architecture_description
564 else ""
565 )
566 + (
567 model.config.bioimageio.architecture_description
568 if model.config.bioimageio.architecture_description is not None
569 else ""
570 )
571 + "\n"
572 )
574 io_desc, referenced_files, input_ids, output_ids = _get_io_description(model)
575 predict_snippet_inputs = str(
576 {input_id: "<path or tensor>" for input_id in input_ids}
577 )
578 model_arch_and_objective += io_desc
580 hardware_requirements = "\n### Hardware Requirements\n"
581 if model.config.bioimageio.memory_requirements_training is not None:
582 hardware_requirements += f"- **Training:** GPU memory: {model.config.bioimageio.memory_requirements_training}\n"
584 if model.config.bioimageio.memory_requirements_inference is not None:
585 hardware_requirements += f"- **Inference:** GPU memory: {model.config.bioimageio.memory_requirements_inference}\n"
587 hardware_requirements += f"- **Storage:** Model size: {model_size}\n"
589 if model.license is None:
590 license = "unknown"
591 license_meta = "unknown"
592 else:
593 spdx_licenses = get_spdx_licenses()
594 matches = [
595 (entry["name"], entry["reference"])
596 for entry in spdx_licenses["licenses"]
597 if entry["licenseId"].lower() == model.license.lower()
598 ]
599 if matches:
600 if len(matches) > 1:
601 logger.warning(
602 "Multiple SPDX license matches found for '{}', using the first one.",
603 model.license,
604 )
605 name, reference = matches[0]
606 license = f"[{name}]({reference})"
607 if model.license.lower() in HF_KNOWN_LICENSES:
608 license_meta = model.license.lower()
609 else:
610 license_meta = f"other\nlicense_name: {model.license.lower()}\nlicense_link: {reference}"
611 else:
612 if model.license.lower() in HF_KNOWN_LICENSES:
613 license_meta = model.license.lower()
614 else:
615 license_meta = "unknown"
617 license = model.license.lower()
619 base_model = (
620 f"\nbase_model: {model.parent.id[len('huggingface/') :]}"
621 if model.parent is not None and model.parent.id.startswith("huggingface/")
622 else ""
623 )
624 dataset_meta = (
625 f"\ndataset: {model.training_data.id[len('huggingface/') :]}"
626 if model.training_data is not None
627 and model.training_data.id is not None
628 and model.training_data.id.startswith("huggingface/")
629 else ""
630 )
631 if model.covers:
632 cover_image_reader = get_reader(model.covers[0])
633 cover_image_bytes = cover_image_reader.read()
634 cover_image_filename = f"images/{cover_image_reader.original_file_name}"
635 referenced_files[cover_image_filename] = cover_image_bytes
636 cover_image_md = f"\n\n\n"
637 thumbnail_meta = (
638 f"\nthumbnail: {cover_image_filename}" # TODO: fix this to be a proper URL
639 )
641 else:
642 cover_image_md = ""
643 thumbnail_meta = ""
645 # TODO: add pipeline_tag to metadata
646 readme = f"""---
647license: {license_meta}{thumbnail_meta}
648tags: {list({"biology"}.union(set(model.tags)))}
649language: [en]
650library_name: bioimageio{base_model}{dataset_meta}
651---
652# {model.name}{cover_image_md}
654{model.description or ""}
657# Table of Contents
659- [Model Details](#model-details)
660- [Uses](#uses)
661- [Bias, Risks, and Limitations](#bias-risks-and-limitations)
662- [How to Get Started with the Model](#how-to-get-started-with-the-model)
663- [Training Details](#training-details){evaluation_toc_entry}{
664 environmental_impact_toc_entry
665 }
666- [Technical Specifications](#technical-specifications)
669# Model Details
671## Model Description
672{model_version}{additional_model_doc}{developed_by}{funded_by}{shared_by}{model_type}{
673 model_modality
674 }{target_structures}{task_type}
675- **License:** {license}{finetuned_from}
677## Model Sources
679- **Repository:** {repository}
680- **Paper:** see [**Developed by**](#model-description)
682# Uses
684## Direct Use
686This model is compatible with the bioimageio.spec Python package (version >= {
687 VERSION
688 }) and the bioimageio.core Python package supporting model inference in Python code or via the `bioimageio` CLI.
690```python
691from bioimageio.core import predict
693output_sample = predict(
694 "huggingface/{repo_id}/{model.version or "draft"}",
695 inputs={predict_snippet_inputs},
696)
698output_tensor = output_sample.members["{
699 output_ids[0] if output_ids else "<output_id>"
700 }"]
701xarray_dataarray = output_tensor.data
702numpy_ndarray = output_tensor.data.to_numpy()
703```
705## Downstream Use
707Specific bioimage.io partner tool compatibilities may be reported at [Compatibility Reports](https://bioimage-io.github.io/collection/latest/compatibility/#compatibility-by-resource).
708{
709 "Training (and fine-tuning) code may be available at " + model.git_repo + "."
710 if model.git_repo
711 else ""
712 }
714## Out-of-Scope Use
716{out_of_scope_use}
719{model.config.bioimageio.bias_risks_limitations.format_md()}
721# How to Get Started with the Model
723You can use "huggingface/{repo_id}/{
724 model.version or "draft"
725 }" as the resource identifier to load this model directly from the Hugging Face Hub using bioimageio.spec or bioimageio.core.
727See [bioimageio.core documentation: Get started](https://bioimage-io.github.io/core-bioimage-io-python/latest/get-started) for instructions on how to load and run this model using the `bioimageio.core` Python package or the bioimageio CLI.
729# Training Details
731## Training Data
733{
734 "This model was trained on `" + str(model.training_data.id) + "`."
735 if model.training_data is not None
736 else "missing"
737 }
739## Training Procedure
741{training_details}
743{speeds_sizes_times}
744{evaluation}
745{environmental_impact}
747# Technical Specifications
749{model_arch_and_objective}
751## Compute Infrastructure
753{hardware_requirements}
755### Software
757- **Framework:** {dl_frameworks}
758- **Libraries:** {dependencies}
759- **BioImage.IO partner compatibility:** [Compatibility Reports](https://bioimage-io.github.io/collection/latest/compatibility/#compatibility-by-resource)
761---
763*This model card was created using the template of the bioimageio.spec Python Package, which intern is based on the BioImage Model Zoo template, incorporating best practices from the Hugging Face Model Card Template. For more information on contributing models, visit [bioimage.io](https://bioimage.io).*
765---
767**References:**
769- [Hugging Face Model Card Template](https://huggingface.co/docs/hub/en/model-card-annotated)
770- [Hugging Face modelcard_template.md](https://github.com/huggingface/huggingface_hub/blob/b9decfdf9b9a162012bc52f260fd64fc37db660e/src/huggingface_hub/templates/modelcard_template.md)
771- [BioImage Model Zoo Documentation](https://bioimage.io/docs/)
772- [Model Cards for Model Reporting](https://arxiv.org/abs/1810.03993)
773- [bioimageio.spec Python Package](https://bioimage-io.github.io/spec-bioimage-io)
774"""
776 return readme, referenced_files