Coverage for src / bioimageio / spec / _hf_card.py: 82%
296 statements
« prev ^ index » next coverage.py v7.13.5, created at 2026-03-27 14:45 +0000
« prev ^ index » next coverage.py v7.13.5, created at 2026-03-27 14:45 +0000
1import collections.abc
2import warnings
3from functools import partial
4from pathlib import PurePosixPath
5from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
7import numpy as np
8from imageio.v3 import imwrite # pyright: ignore[reportUnknownVariableType]
9from loguru import logger
10from numpy.typing import NDArray
11from typing_extensions import assert_never
13from bioimageio.spec._internal.validation_context import get_validation_context
14from bioimageio.spec.model.v0_5 import (
15 IntervalOrRatioDataDescr,
16 KerasHdf5WeightsDescr,
17 KerasV3WeightsDescr,
18 NominalOrOrdinalDataDescr,
19 OnnxWeightsDescr,
20 PytorchStateDictWeightsDescr,
21 TensorflowJsWeightsDescr,
22 TensorflowSavedModelBundleWeightsDescr,
23 TensorId,
24 TorchscriptWeightsDescr,
25)
27from ._internal.io import RelativeFilePath, get_reader
28from ._internal.io_utils import load_array
29from ._version import VERSION
30from .model import ModelDescr
31from .utils import get_spdx_licenses, load_image
33HF_KNOWN_LICENSES = (
34 "apache-2.0",
35 "mit",
36 "openrail",
37 "bigscience-openrail-m",
38 "creativeml-openrail-m",
39 "bigscience-bloom-rail-1.0",
40 "bigcode-openrail-m",
41 "afl-3.0",
42 "artistic-2.0",
43 "bsl-1.0",
44 "bsd",
45 "bsd-2-clause",
46 "bsd-3-clause",
47 "bsd-3-clause-clear",
48 "c-uda",
49 "cc",
50 "cc0-1.0",
51 "cc-by-2.0",
52 "cc-by-2.5",
53 "cc-by-3.0",
54 "cc-by-4.0",
55 "cc-by-sa-3.0",
56 "cc-by-sa-4.0",
57 "cc-by-nc-2.0",
58 "cc-by-nc-3.0",
59 "cc-by-nc-4.0",
60 "cc-by-nd-4.0",
61 "cc-by-nc-nd-3.0",
62 "cc-by-nc-nd-4.0",
63 "cc-by-nc-sa-2.0",
64 "cc-by-nc-sa-3.0",
65 "cc-by-nc-sa-4.0",
66 "cdla-sharing-1.0",
67 "cdla-permissive-1.0",
68 "cdla-permissive-2.0",
69 "wtfpl",
70 "ecl-2.0",
71 "epl-1.0",
72 "epl-2.0",
73 "etalab-2.0",
74 "eupl-1.1",
75 "eupl-1.2",
76 "agpl-3.0",
77 "gfdl",
78 "gpl",
79 "gpl-2.0",
80 "gpl-3.0",
81 "lgpl",
82 "lgpl-2.1",
83 "lgpl-3.0",
84 "isc",
85 "h-research",
86 "intel-research",
87 "lppl-1.3c",
88 "ms-pl",
89 "apple-ascl",
90 "apple-amlr",
91 "mpl-2.0",
92 "odc-by",
93 "odbl",
94 "openmdw-1.0",
95 "openrail++",
96 "osl-3.0",
97 "postgresql",
98 "ofl-1.1",
99 "ncsa",
100 "unlicense",
101 "zlib",
102 "pddl",
103 "lgpl-lr",
104 "deepfloyd-if-license",
105 "fair-noncommercial-research-license",
106 "llama2",
107 "llama3",
108 "llama3.1",
109 "llama3.2",
110 "llama3.3",
111 "llama4",
112 "grok2-community",
113 "gemma",
114)
117def _generate_png_from_tensor(tensor: NDArray[np.generic]) -> Optional[bytes]:
118 """Generate PNG bytes from a sample tensor.
120 Prefers 2D slices from multi-dimensional arrays.
121 Returns PNG bytes or None if generation fails.
122 """
123 try:
124 # Squeeze out singleton dimensions
125 arr = np.squeeze(tensor)
127 # Handle different dimensionalities
128 if arr.ndim == 2:
129 img_data = arr
130 elif arr.ndim == 3:
131 # Could be (H, W, C) or (Z, H, W)
132 if arr.shape[-1] in [1, 3, 4]: # Likely channels last
133 img_data = arr
134 else: # Take middle slice
135 img_data = arr[arr.shape[0] // 2]
136 elif arr.ndim == 4:
137 # Take middle slices (e.g., batch, z, y, x)
138 img_data = (
139 arr[0, arr.shape[1] // 2]
140 if arr.shape[0] == 1
141 else arr[arr.shape[0] // 2, arr.shape[1] // 2]
142 )
143 elif arr.ndim > 4:
144 # Take middle slices of all extra dimensions
145 slices = tuple(s // 2 for s in arr.shape[:-2])
146 img_data = arr[slices]
147 else:
148 return None
150 # Normalize to 0-255 uint8
151 img_data = np.squeeze(img_data)
152 if img_data.dtype != np.uint8:
153 img_min, img_max = img_data.min(), img_data.max()
154 if img_max > img_min:
155 img_data: NDArray[Any] = (img_data - img_min) / (img_max - img_min)
156 else:
157 img_data = np.zeros_like(img_data)
158 img_data = (img_data * 255).astype(np.uint8)
159 return imwrite("<bytes>", img_data, extension=".png")
160 except Exception:
161 return None
164def _get_io_description(
165 model: ModelDescr,
166) -> Tuple[str, Dict[str, bytes], List[TensorId], List[TensorId]]:
167 """Generate a description of model inputs and outputs with sample images.
169 Returns:
170 A tuple of (markdown_string, referenced_files_dict, input_ids, output_ids) where referenced_files_dict maps
171 filenames to file bytes.
172 """
173 markdown_string = ""
174 referenced_files: dict[str, bytes] = {}
175 input_ids: List[TensorId] = []
176 output_ids: List[TensorId] = []
178 def format_data_descr(
179 d: Union[
180 NominalOrOrdinalDataDescr,
181 IntervalOrRatioDataDescr,
182 Sequence[Union[NominalOrOrdinalDataDescr, IntervalOrRatioDataDescr]],
183 ],
184 ) -> str:
185 ret = ""
186 if isinstance(d, NominalOrOrdinalDataDescr):
187 ret += f" - Values: {d.values}\n"
188 elif isinstance(d, IntervalOrRatioDataDescr):
189 ret += f" - Value unit: {d.unit}\n"
190 ret += f" - Value scale factor: {d.scale}\n"
191 if d.offset is not None:
192 ret += f" - Value offset: {d.offset}\n"
193 elif d.range[0] is not None:
194 ret += f" - Value minimum: {d.range[0]}\n"
195 elif d.range[1] is not None:
196 ret += f" - Value maximum: {d.range[1]}\n"
197 elif isinstance(d, collections.abc.Sequence):
198 for dd in d:
199 ret += format_data_descr(dd)
200 else:
201 assert_never(d)
203 return ret
205 # Input descriptions
206 if model.inputs:
207 markdown_string += "\n- **Input specifications:**\n"
209 for inp in model.inputs:
210 input_ids.append(inp.id)
211 axes_str = ", ".join(str(a.id) for a in inp.axes)
212 shape_str = " × ".join(
213 str(a.size) if isinstance(a.size, int) else str(a.size)
214 for a in inp.axes
215 )
217 markdown_string += f" `{inp.id}`: {inp.description or ''}\n\n"
218 markdown_string += f" - Axes: `{axes_str}`\n"
219 markdown_string += f" - Shape: `{shape_str}`\n"
220 markdown_string += f" - Data type: `{inp.dtype}`\n"
221 markdown_string += format_data_descr(inp.data)
223 # Try to load and display sample_tensor (preferred) or test_tensor
224 img_bytes = None
225 if inp.sample_tensor is not None:
226 try:
227 arr = load_image(inp.sample_tensor)
228 img_bytes = _generate_png_from_tensor(arr)
229 except Exception as e:
230 logger.error("failed to generate input sample image: {}", e)
232 if img_bytes is None and inp.test_tensor is not None:
233 try:
234 arr = load_array(inp.test_tensor)
235 img_bytes = _generate_png_from_tensor(arr)
236 except Exception as e:
237 logger.error(
238 "failed to generate input sample image from test data: {}", e
239 )
241 if img_bytes:
242 filename = f"images/input_{inp.id}_sample.png"
243 referenced_files[filename] = img_bytes
244 markdown_string += f" - example\n \n"
246 # Output descriptions
247 if model.outputs:
248 markdown_string += "\n- **Output specifications:**\n"
249 for out in model.outputs:
250 output_ids.append(out.id)
251 axes_str = ", ".join(str(a.id) for a in out.axes)
252 shape_str = " × ".join(
253 str(a.size) if isinstance(a.size, int) else str(a.size)
254 for a in out.axes
255 )
257 markdown_string += f" `{out.id}`: {out.description or ''}\n"
258 markdown_string += f" - Axes: `{axes_str}`\n"
259 markdown_string += f" - Shape: `{shape_str}`\n"
260 markdown_string += f" - Data type: `{out.dtype}`\n"
261 markdown_string += format_data_descr(out.data)
263 # Try to load and display sample_tensor (preferred) or test_tensor
264 img_bytes = None
265 if out.sample_tensor is not None:
266 try:
267 arr = load_image(out.sample_tensor)
268 img_bytes = _generate_png_from_tensor(arr)
269 except Exception as e:
270 logger.error("failed to generate output sample image: {}", e)
272 if img_bytes is None and out.test_tensor is not None:
273 try:
274 arr = load_array(out.test_tensor)
275 img_bytes = _generate_png_from_tensor(arr)
276 except Exception as e:
277 logger.error(
278 "failed to generate output sample image from test data: {}", e
279 )
281 if img_bytes:
282 filename = f"images/output_{out.id}_sample.png"
283 referenced_files[filename] = img_bytes
284 markdown_string += f" - example\n {out.id} sample]({filename})\n"
286 return markdown_string, referenced_files, input_ids, output_ids
289def create_huggingface_model_card(
290 model: ModelDescr, *, repo_id: str
291) -> Tuple[str, Dict[str, bytes]]:
292 """Create a Hugging Face model card for a BioImage.IO model.
294 Returns:
295 A tuple of (markdown_string, images_dict) where images_dict maps
296 filenames to PNG bytes that should be saved alongside the markdown.
297 """
298 if model.version is None:
299 model_version = ""
300 else:
301 model_version = f"\n- **model version:** {model.version}"
303 if model.documentation is None:
304 additional_model_doc = ""
305 else:
306 doc_reader = get_reader(model.documentation)
307 local_doc_path = f"package/{doc_reader.original_file_name}"
308 model = model.model_copy()
309 with get_validation_context().replace(perform_io_checks=False):
310 model.documentation = RelativeFilePath(PurePosixPath(local_doc_path))
312 additional_model_doc = f"\n- **Additional model documentation:** [{local_doc_path}]({local_doc_path})"
314 if model.cite:
315 developed_by = "\n- **Developed by:** " + (
316 "".join(
317 (
318 f"\n - {c.text}: "
319 + (f"https://www.doi.org/{c.doi}" if c.doi else str(c.url))
320 )
321 for c in model.cite
322 )
323 )
324 else:
325 developed_by = ""
327 if model.config.bioimageio.funded_by:
328 funded_by = f"\n- **Funded by:** {model.config.bioimageio.funded_by}"
329 else:
330 funded_by = ""
332 if model.authors:
333 shared_by = "\n- **Shared by:** " + (
334 "".join(
335 (
336 f"\n - {a.name}"
337 + (f", {a.affiliation}" if a.affiliation else "")
338 + (
339 f", [https://orcid.org/{a.orcid}](https://orcid.org/{a.orcid})"
340 if a.orcid
341 else ""
342 )
343 + (
344 f", [https://github.com/{a.github_user}](https://github.com/{a.github_user})"
345 if a.github_user
346 else ""
347 )
348 for a in model.authors
349 )
350 )
351 )
352 else:
353 shared_by = ""
355 if model.config.bioimageio.architecture_type:
356 model_type = f"\n- **Model type:** {model.config.bioimageio.architecture_type}"
357 else:
358 model_type = ""
360 if model.config.bioimageio.modality:
361 model_modality = f"\n- **Modality:** {model.config.bioimageio.modality}"
362 else:
363 model_modality = ""
365 if model.config.bioimageio.target_structure:
366 target_structures = "\n- **Target structures:** " + ", ".join(
367 model.config.bioimageio.target_structure
368 )
369 else:
370 target_structures = ""
372 if model.config.bioimageio.task:
373 task_type = f"\n- **Task type:** {model.config.bioimageio.task}"
374 else:
375 task_type = ""
377 if model.parent:
378 finetuned_from = f"\n- **Finetuned from model:** {model.parent.id}"
379 else:
380 finetuned_from = ""
382 repository = (
383 f"[{model.git_repo}]({model.git_repo})" if model.git_repo else "missing"
384 )
386 dl_framework_parts: List[str] = []
387 training_frameworks: List[str] = []
388 model_size: Optional[str] = None
389 for weights in model.weights.available_formats.values():
390 if isinstance(weights, (PytorchStateDictWeightsDescr, TorchscriptWeightsDescr)):
391 dl_framework_version = weights.pytorch_version
392 elif isinstance(
393 weights,
394 (
395 TensorflowSavedModelBundleWeightsDescr,
396 TensorflowJsWeightsDescr,
397 KerasHdf5WeightsDescr,
398 ),
399 ):
400 dl_framework_version = weights.tensorflow_version
401 elif isinstance(weights, KerasV3WeightsDescr):
402 dl_framework_version = weights.keras_version
403 elif isinstance(weights, OnnxWeightsDescr):
404 dl_framework_version = f"opset version: {weights.opset_version}"
405 else:
406 assert_never(weights)
408 if weights.parent is None:
409 training_frameworks.append(weights.weights_format_name)
411 dl_framework_parts.append(
412 f"\n - {weights.weights_format_name}: {dl_framework_version}"
413 )
415 if model_size is None:
416 s = 0
417 r = weights.get_reader()
418 for chunk in iter(partial(r.read, 128 * 1024), b""):
419 s += len(chunk)
421 if model.config.bioimageio.model_parameter_count is not None:
422 if model.config.bioimageio.model_parameter_count < 1e9:
423 model_size = f"{model.config.bioimageio.model_parameter_count / 1e6:.2f} million parameters, "
424 else:
425 model_size = f"{model.config.bioimageio.model_parameter_count / 1e9:.2f} billion parameters, "
426 else:
427 model_size = ""
429 if s < 1e9:
430 model_size += f"{s / 1e6:.2f} MB"
431 else:
432 model_size += f"{s / 1e9:.2f} GB"
434 dl_frameworks = "".join(dl_framework_parts)
435 if len(training_frameworks) > 1:
436 warnings.warn(
437 "Multiple training frameworks detected. (Some weight formats are probably missing a `parent` reference.)"
438 )
440 if (
441 model.weights.pytorch_state_dict is not None
442 and model.weights.pytorch_state_dict.dependencies is not None
443 ):
444 env_reader = model.weights.pytorch_state_dict.dependencies.get_reader()
445 dependencies = f"Dependencies for Pytorch State dict weights are listed in [{env_reader.original_file_name}](package/{env_reader.original_file_name})."
446 else:
447 dependencies = "None beyond the respective framework library."
449 out_of_scope_use = (
450 model.config.bioimageio.out_of_scope_use
451 if model.config.bioimageio.out_of_scope_use
452 else """missing; therefore these typical limitations should be considered:
454- *Likely not suitable for diagnostic purposes.*
455- *Likely not validated for different imaging modalities than present in the training data.*
456- *Should not be used without proper validation on user's specific datasets.*
458"""
459 )
461 environmental_impact = model.config.bioimageio.environmental_impact.format_md()
462 if environmental_impact:
463 environmental_impact_toc_entry = (
464 "\n- [Environmental Impact](#environmental-impact)"
465 )
466 else:
467 environmental_impact_toc_entry = ""
469 evaluation_parts: List[str] = []
470 n_evals = 0
471 for e in model.config.bioimageio.evaluations:
472 if e.dataset_role == "independent":
473 continue # treated separately below
475 n_evals += 1
476 n_evals_str = "" if n_evals == 1 else f" {n_evals}"
477 evaluation_parts.append(f"\n# Evaluation{n_evals_str}\n")
478 evaluation_parts.append(e.format_md())
480 n_evals = 0
481 for e in model.config.bioimageio.evaluations:
482 if e.dataset_role != "independent":
483 continue # treated separately above
485 n_evals += 1
486 n_evals_str = "" if n_evals == 1 else f" {n_evals}"
488 evaluation_parts.append(f"### Validation on External Data{n_evals_str}\n")
489 evaluation_parts.append(e.format_md())
491 if evaluation_parts:
492 evaluation = "\n".join(evaluation_parts)
493 evaluation_toc_entry = "\n- [Evaluation](#evaluation)"
494 else:
495 evaluation = ""
496 evaluation_toc_entry = ""
498 training_details = ""
499 if model.config.bioimageio.training.training_preprocessing:
500 training_details += f"### Preprocessing\n\n{model.config.bioimageio.training.training_preprocessing}\n\n"
502 training_details += "### Training Hyperparameters\n\n"
503 training_details += f"- **Framework:** {' / '.join(training_frameworks)}"
504 if model.config.bioimageio.training.training_epochs is not None:
505 training_details += (
506 f"- **Epochs:** {model.config.bioimageio.training.training_epochs}\n"
507 )
509 if model.config.bioimageio.training.training_batch_size is not None:
510 training_details += f"- **Batch size:** {model.config.bioimageio.training.training_batch_size}\n"
512 if model.config.bioimageio.training.initial_learning_rate is not None:
513 training_details += f"- **Initial learning rate:** {model.config.bioimageio.training.initial_learning_rate}\n"
515 if model.config.bioimageio.training.learning_rate_schedule is not None:
516 training_details += f"- **Learning rate schedule:** {model.config.bioimageio.training.learning_rate_schedule}\n"
518 if model.config.bioimageio.training.loss_function is not None:
519 training_details += (
520 f"- **Loss function:** {model.config.bioimageio.training.loss_function}"
521 )
522 if model.config.bioimageio.training.loss_function_kwargs:
523 training_details += (
524 f" with {model.config.bioimageio.training.loss_function_kwargs}"
525 )
526 training_details += "\n"
528 if model.config.bioimageio.training.optimizer is not None:
529 training_details += (
530 f"- **Optimizer:** {model.config.bioimageio.training.optimizer}"
531 )
532 if model.config.bioimageio.training.optimizer_kwargs:
533 training_details += (
534 f" with {model.config.bioimageio.training.optimizer_kwargs}"
535 )
536 training_details += "\n"
538 if model.config.bioimageio.training.regularization is not None:
539 training_details += (
540 f"- **Regularization:** {model.config.bioimageio.training.regularization}\n"
541 )
543 speeds_sizes_times = "### Speeds, Sizes, Times\n\n"
544 if model.config.bioimageio.training.training_duration is not None:
545 speeds_sizes_times += f"- **Training time:** {'{:.2f}'.format(model.config.bioimageio.training.training_duration)}\n"
547 speeds_sizes_times += f"- **Model size:** {model_size}\n"
548 if model.config.bioimageio.inference_time:
549 speeds_sizes_times += (
550 f"- **Inference time:** {model.config.bioimageio.inference_time}\n"
551 )
553 if model.config.bioimageio.memory_requirements_inference:
554 speeds_sizes_times += f"- **Memory requirements:** {model.config.bioimageio.memory_requirements_inference}\n"
556 model_arch_and_objective = "## Model Architecture and Objective\n\n"
557 if (
558 model.config.bioimageio.architecture_type
559 or model.config.bioimageio.architecture_description
560 ):
561 model_arch_and_objective += (
562 f"- **Architecture:** {model.config.bioimageio.architecture_type or ''}"
563 + (
564 " --- "
565 if model.config.bioimageio.architecture_type
566 and model.config.bioimageio.architecture_description
567 else ""
568 )
569 + (
570 model.config.bioimageio.architecture_description
571 if model.config.bioimageio.architecture_description is not None
572 else ""
573 )
574 + "\n"
575 )
577 io_desc, referenced_files, input_ids, output_ids = _get_io_description(model)
578 predict_snippet_inputs = str(
579 {input_id: "<path or tensor>" for input_id in input_ids}
580 )
581 model_arch_and_objective += io_desc
583 hardware_requirements = "\n### Hardware Requirements\n"
584 if model.config.bioimageio.memory_requirements_training is not None:
585 hardware_requirements += f"- **Training:** GPU memory: {model.config.bioimageio.memory_requirements_training}\n"
587 if model.config.bioimageio.memory_requirements_inference is not None:
588 hardware_requirements += f"- **Inference:** GPU memory: {model.config.bioimageio.memory_requirements_inference}\n"
590 hardware_requirements += f"- **Storage:** Model size: {model_size}\n"
592 if model.license is None:
593 license = "unknown"
594 license_meta = "unknown"
595 else:
596 spdx_licenses = get_spdx_licenses()
597 matches = [
598 (entry["name"], entry["reference"])
599 for entry in spdx_licenses["licenses"]
600 if entry["licenseId"].lower() == model.license.lower()
601 ]
602 if matches:
603 if len(matches) > 1:
604 logger.warning(
605 "Multiple SPDX license matches found for '{}', using the first one.",
606 model.license,
607 )
608 name, reference = matches[0]
609 license = f"[{name}]({reference})"
610 if model.license.lower() in HF_KNOWN_LICENSES:
611 license_meta = model.license.lower()
612 else:
613 license_meta = f"other\nlicense_name: {model.license.lower()}\nlicense_link: {reference}"
614 else:
615 if model.license.lower() in HF_KNOWN_LICENSES:
616 license_meta = model.license.lower()
617 else:
618 license_meta = "unknown"
620 license = model.license.lower()
622 base_model = (
623 f"\nbase_model: {model.parent.id[len('huggingface/') :]}"
624 if model.parent is not None and model.parent.id.startswith("huggingface/")
625 else ""
626 )
627 dataset_meta = (
628 f"\ndataset: {model.training_data.id[len('huggingface/') :]}"
629 if model.training_data is not None
630 and model.training_data.id is not None
631 and model.training_data.id.startswith("huggingface/")
632 else ""
633 )
634 if model.covers:
635 cover_image_reader = get_reader(model.covers[0])
636 cover_image_bytes = cover_image_reader.read()
637 cover_image_filename = f"images/{cover_image_reader.original_file_name}"
638 referenced_files[cover_image_filename] = cover_image_bytes
639 cover_image_md = f"\n\n\n"
640 thumbnail_meta = (
641 f"\nthumbnail: {cover_image_filename}" # TODO: fix this to be a proper URL
642 )
644 else:
645 cover_image_md = ""
646 thumbnail_meta = ""
648 # TODO: add pipeline_tag to metadata
649 readme = f"""---
650license: {license_meta}{thumbnail_meta}
651tags: {list({"biology"}.union(set(model.tags)))}
652language: [en]
653library_name: bioimageio{base_model}{dataset_meta}
654---
655# {model.name}{cover_image_md}
657{model.description or ""}
660# Table of Contents
662- [Model Details](#model-details)
663- [Uses](#uses)
664- [Bias, Risks, and Limitations](#bias-risks-and-limitations)
665- [How to Get Started with the Model](#how-to-get-started-with-the-model)
666- [Training Details](#training-details){evaluation_toc_entry}{
667 environmental_impact_toc_entry
668 }
669- [Technical Specifications](#technical-specifications)
672# Model Details
674## Model Description
675{model_version}{additional_model_doc}{developed_by}{funded_by}{shared_by}{model_type}{
676 model_modality
677 }{target_structures}{task_type}
678- **License:** {license}{finetuned_from}
680## Model Sources
682- **Repository:** {repository}
683- **Paper:** see [**Developed by**](#model-description)
685# Uses
687## Direct Use
689This model is compatible with the bioimageio.spec Python package (version >= {
690 VERSION
691 }) and the bioimageio.core Python package supporting model inference in Python code or via the `bioimageio` CLI.
693```python
694from bioimageio.core import predict
696output_sample = predict(
697 "huggingface/{repo_id}/{model.version or "draft"}",
698 inputs={predict_snippet_inputs},
699)
701output_tensor = output_sample.members["{
702 output_ids[0] if output_ids else "<output_id>"
703 }"]
704xarray_dataarray = output_tensor.data
705numpy_ndarray = output_tensor.data.to_numpy()
706```
708## Downstream Use
710Specific bioimage.io partner tool compatibilities may be reported at [Compatibility Reports](https://bioimage-io.github.io/collection/latest/compatibility/#compatibility-by-resource).
711{
712 "Training (and fine-tuning) code may be available at " + model.git_repo + "."
713 if model.git_repo
714 else ""
715 }
717## Out-of-Scope Use
719{out_of_scope_use}
722{model.config.bioimageio.bias_risks_limitations.format_md()}
724# How to Get Started with the Model
726You can use "huggingface/{repo_id}/{
727 model.version or "draft"
728 }" as the resource identifier to load this model directly from the Hugging Face Hub using bioimageio.spec or bioimageio.core.
730See [bioimageio.core documentation: Get started](https://bioimage-io.github.io/core-bioimage-io-python/latest/get-started) for instructions on how to load and run this model using the `bioimageio.core` Python package or the bioimageio CLI.
732# Training Details
734## Training Data
736{
737 "This model was trained on `" + str(model.training_data.id) + "`."
738 if model.training_data is not None
739 else "missing"
740 }
742## Training Procedure
744{training_details}
746{speeds_sizes_times}
747{evaluation}
748{environmental_impact}
750# Technical Specifications
752{model_arch_and_objective}
754## Compute Infrastructure
756{hardware_requirements}
758### Software
760- **Framework:** {dl_frameworks}
761- **Libraries:** {dependencies}
762- **BioImage.IO partner compatibility:** [Compatibility Reports](https://bioimage-io.github.io/collection/latest/compatibility/#compatibility-by-resource)
764---
766*This model card was created using the template of the bioimageio.spec Python Package, which intern is based on the BioImage Model Zoo template, incorporating best practices from the Hugging Face Model Card Template. For more information on contributing models, visit [bioimage.io](https://bioimage.io).*
768---
770**References:**
772- [Hugging Face Model Card Template](https://huggingface.co/docs/hub/en/model-card-annotated)
773- [Hugging Face modelcard_template.md](https://github.com/huggingface/huggingface_hub/blob/b9decfdf9b9a162012bc52f260fd64fc37db660e/src/huggingface_hub/templates/modelcard_template.md)
774- [BioImage Model Zoo Documentation](https://bioimage.io/docs/)
775- [Model Cards for Model Reporting](https://arxiv.org/abs/1810.03993)
776- [bioimageio.spec Python Package](https://bioimage-io.github.io/spec-bioimage-io)
777"""
779 return readme, referenced_files