Coverage for src / bioimageio / spec / _hf_card.py: 82%

296 statements  

« prev     ^ index     » next       coverage.py v7.13.5, created at 2026-03-31 13:09 +0000

1import collections.abc 

2import warnings 

3from functools import partial 

4from pathlib import PurePosixPath 

5from typing import Any, Dict, List, Optional, Sequence, Tuple, Union 

6 

7import numpy as np 

8from imageio.v3 import imwrite # pyright: ignore[reportUnknownVariableType] 

9from loguru import logger 

10from numpy.typing import NDArray 

11from typing_extensions import assert_never 

12 

13from bioimageio.spec._internal.validation_context import get_validation_context 

14from bioimageio.spec.model.v0_5 import ( 

15 IntervalOrRatioDataDescr, 

16 KerasHdf5WeightsDescr, 

17 KerasV3WeightsDescr, 

18 NominalOrOrdinalDataDescr, 

19 OnnxWeightsDescr, 

20 PytorchStateDictWeightsDescr, 

21 TensorflowJsWeightsDescr, 

22 TensorflowSavedModelBundleWeightsDescr, 

23 TensorId, 

24 TorchscriptWeightsDescr, 

25) 

26 

27from ._internal.io import RelativeFilePath, get_reader 

28from ._internal.io_utils import load_array 

29from ._version import VERSION 

30from .model import ModelDescr 

31from .utils import get_spdx_licenses, load_image 

32 

33HF_KNOWN_LICENSES = ( 

34 "apache-2.0", 

35 "mit", 

36 "openrail", 

37 "bigscience-openrail-m", 

38 "creativeml-openrail-m", 

39 "bigscience-bloom-rail-1.0", 

40 "bigcode-openrail-m", 

41 "afl-3.0", 

42 "artistic-2.0", 

43 "bsl-1.0", 

44 "bsd", 

45 "bsd-2-clause", 

46 "bsd-3-clause", 

47 "bsd-3-clause-clear", 

48 "c-uda", 

49 "cc", 

50 "cc0-1.0", 

51 "cc-by-2.0", 

52 "cc-by-2.5", 

53 "cc-by-3.0", 

54 "cc-by-4.0", 

55 "cc-by-sa-3.0", 

56 "cc-by-sa-4.0", 

57 "cc-by-nc-2.0", 

58 "cc-by-nc-3.0", 

59 "cc-by-nc-4.0", 

60 "cc-by-nd-4.0", 

61 "cc-by-nc-nd-3.0", 

62 "cc-by-nc-nd-4.0", 

63 "cc-by-nc-sa-2.0", 

64 "cc-by-nc-sa-3.0", 

65 "cc-by-nc-sa-4.0", 

66 "cdla-sharing-1.0", 

67 "cdla-permissive-1.0", 

68 "cdla-permissive-2.0", 

69 "wtfpl", 

70 "ecl-2.0", 

71 "epl-1.0", 

72 "epl-2.0", 

73 "etalab-2.0", 

74 "eupl-1.1", 

75 "eupl-1.2", 

76 "agpl-3.0", 

77 "gfdl", 

78 "gpl", 

79 "gpl-2.0", 

80 "gpl-3.0", 

81 "lgpl", 

82 "lgpl-2.1", 

83 "lgpl-3.0", 

84 "isc", 

85 "h-research", 

86 "intel-research", 

87 "lppl-1.3c", 

88 "ms-pl", 

89 "apple-ascl", 

90 "apple-amlr", 

91 "mpl-2.0", 

92 "odc-by", 

93 "odbl", 

94 "openmdw-1.0", 

95 "openrail++", 

96 "osl-3.0", 

97 "postgresql", 

98 "ofl-1.1", 

99 "ncsa", 

100 "unlicense", 

101 "zlib", 

102 "pddl", 

103 "lgpl-lr", 

104 "deepfloyd-if-license", 

105 "fair-noncommercial-research-license", 

106 "llama2", 

107 "llama3", 

108 "llama3.1", 

109 "llama3.2", 

110 "llama3.3", 

111 "llama4", 

112 "grok2-community", 

113 "gemma", 

114) 

115 

116 

117def _generate_png_from_tensor(tensor: NDArray[np.generic]) -> Optional[bytes]: 

118 """Generate PNG bytes from a sample tensor. 

119 

120 Prefers 2D slices from multi-dimensional arrays. 

121 Returns PNG bytes or None if generation fails. 

122 """ 

123 try: 

124 # Squeeze out singleton dimensions 

125 arr = np.squeeze(tensor) 

126 

127 # Handle different dimensionalities 

128 if arr.ndim == 2: 

129 img_data = arr 

130 elif arr.ndim == 3: 

131 # Could be (H, W, C) or (Z, H, W) 

132 if arr.shape[-1] in [1, 3, 4]: # Likely channels last 

133 img_data = arr 

134 else: # Take middle slice 

135 img_data = arr[arr.shape[0] // 2] 

136 elif arr.ndim == 4: 

137 # Take middle slices (e.g., batch, z, y, x) 

138 img_data = ( 

139 arr[0, arr.shape[1] // 2] 

140 if arr.shape[0] == 1 

141 else arr[arr.shape[0] // 2, arr.shape[1] // 2] 

142 ) 

143 elif arr.ndim > 4: 

144 # Take middle slices of all extra dimensions 

145 slices = tuple(s // 2 for s in arr.shape[:-2]) 

146 img_data = arr[slices] 

147 else: 

148 return None 

149 

150 # Normalize to 0-255 uint8 

151 img_data = np.squeeze(img_data) 

152 if img_data.dtype != np.uint8: 

153 img_min, img_max = img_data.min(), img_data.max() 

154 if img_max > img_min: 

155 img_data: NDArray[Any] = (img_data - img_min) / (img_max - img_min) 

156 else: 

157 img_data = np.zeros_like(img_data) 

158 img_data = (img_data * 255).astype(np.uint8) 

159 return imwrite("<bytes>", img_data, extension=".png") 

160 except Exception: 

161 return None 

162 

163 

164def _get_io_description( 

165 model: ModelDescr, 

166) -> Tuple[str, Dict[str, bytes], List[TensorId], List[TensorId]]: 

167 """Generate a description of model inputs and outputs with sample images. 

168 

169 Returns: 

170 A tuple of (markdown_string, referenced_files_dict, input_ids, output_ids) where referenced_files_dict maps 

171 filenames to file bytes. 

172 """ 

173 markdown_string = "" 

174 referenced_files: dict[str, bytes] = {} 

175 input_ids: List[TensorId] = [] 

176 output_ids: List[TensorId] = [] 

177 

178 def format_data_descr( 

179 d: Union[ 

180 NominalOrOrdinalDataDescr, 

181 IntervalOrRatioDataDescr, 

182 Sequence[Union[NominalOrOrdinalDataDescr, IntervalOrRatioDataDescr]], 

183 ], 

184 ) -> str: 

185 ret = "" 

186 if isinstance(d, NominalOrOrdinalDataDescr): 

187 ret += f" - Values: {d.values}\n" 

188 elif isinstance(d, IntervalOrRatioDataDescr): 

189 ret += f" - Value unit: {d.unit}\n" 

190 ret += f" - Value scale factor: {d.scale}\n" 

191 if d.offset is not None: 

192 ret += f" - Value offset: {d.offset}\n" 

193 elif d.range[0] is not None: 

194 ret += f" - Value minimum: {d.range[0]}\n" 

195 elif d.range[1] is not None: 

196 ret += f" - Value maximum: {d.range[1]}\n" 

197 elif isinstance(d, collections.abc.Sequence): 

198 for dd in d: 

199 ret += format_data_descr(dd) 

200 else: 

201 assert_never(d) 

202 

203 return ret 

204 

205 # Input descriptions 

206 if model.inputs: 

207 markdown_string += "\n- **Input specifications:**\n" 

208 

209 for inp in model.inputs: 

210 input_ids.append(inp.id) 

211 axes_str = ", ".join(str(a.id) for a in inp.axes) 

212 shape_str = " × ".join( 

213 str(a.size) if isinstance(a.size, int) else str(a.size) 

214 for a in inp.axes 

215 ) 

216 

217 markdown_string += f" `{inp.id}`: {inp.description or ''}\n\n" 

218 markdown_string += f" - Axes: `{axes_str}`\n" 

219 markdown_string += f" - Shape: `{shape_str}`\n" 

220 markdown_string += f" - Data type: `{inp.dtype}`\n" 

221 markdown_string += format_data_descr(inp.data) 

222 

223 # Try to load and display sample_tensor (preferred) or test_tensor 

224 img_bytes = None 

225 if inp.sample_tensor is not None: 

226 try: 

227 arr = load_image(inp.sample_tensor) 

228 img_bytes = _generate_png_from_tensor(arr) 

229 except Exception as e: 

230 logger.error("failed to generate input sample image: {}", e) 

231 

232 if img_bytes is None and inp.test_tensor is not None: 

233 try: 

234 arr = load_array(inp.test_tensor) 

235 img_bytes = _generate_png_from_tensor(arr) 

236 except Exception as e: 

237 logger.error( 

238 "failed to generate input sample image from test data: {}", e 

239 ) 

240 

241 if img_bytes: 

242 filename = f"images/input_{inp.id}_sample.png" 

243 referenced_files[filename] = img_bytes 

244 markdown_string += f" - example\n ![{inp.id} sample]({filename})\n" 

245 

246 # Output descriptions 

247 if model.outputs: 

248 markdown_string += "\n- **Output specifications:**\n" 

249 for out in model.outputs: 

250 output_ids.append(out.id) 

251 axes_str = ", ".join(str(a.id) for a in out.axes) 

252 shape_str = " × ".join( 

253 str(a.size) if isinstance(a.size, int) else str(a.size) 

254 for a in out.axes 

255 ) 

256 

257 markdown_string += f" `{out.id}`: {out.description or ''}\n" 

258 markdown_string += f" - Axes: `{axes_str}`\n" 

259 markdown_string += f" - Shape: `{shape_str}`\n" 

260 markdown_string += f" - Data type: `{out.dtype}`\n" 

261 markdown_string += format_data_descr(out.data) 

262 

263 # Try to load and display sample_tensor (preferred) or test_tensor 

264 img_bytes = None 

265 if out.sample_tensor is not None: 

266 try: 

267 arr = load_image(out.sample_tensor) 

268 img_bytes = _generate_png_from_tensor(arr) 

269 except Exception as e: 

270 logger.error("failed to generate output sample image: {}", e) 

271 

272 if img_bytes is None and out.test_tensor is not None: 

273 try: 

274 arr = load_array(out.test_tensor) 

275 img_bytes = _generate_png_from_tensor(arr) 

276 except Exception as e: 

277 logger.error( 

278 "failed to generate output sample image from test data: {}", e 

279 ) 

280 

281 if img_bytes: 

282 filename = f"images/output_{out.id}_sample.png" 

283 referenced_files[filename] = img_bytes 

284 markdown_string += f" - example\n {out.id} sample]({filename})\n" 

285 

286 return markdown_string, referenced_files, input_ids, output_ids 

287 

288 

289def create_huggingface_model_card( 

290 model: ModelDescr, *, repo_id: str 

291) -> Tuple[str, Dict[str, bytes]]: 

292 """Create a Hugging Face model card for a BioImage.IO model. 

293 

294 Returns: 

295 A tuple of (markdown_string, images_dict) where images_dict maps 

296 filenames to PNG bytes that should be saved alongside the markdown. 

297 """ 

298 if model.version is None: 

299 model_version = "" 

300 else: 

301 model_version = f"\n- **model version:** {model.version}" 

302 

303 if model.documentation is None: 

304 additional_model_doc = "" 

305 else: 

306 doc_reader = get_reader(model.documentation) 

307 local_doc_path = f"package/{doc_reader.original_file_name}" 

308 model = model.model_copy() 

309 with get_validation_context().replace(perform_io_checks=False): 

310 model.documentation = RelativeFilePath(PurePosixPath(local_doc_path)) 

311 

312 additional_model_doc = f"\n- **Additional model documentation:** [{local_doc_path}]({local_doc_path})" 

313 

314 if model.cite: 

315 developed_by = "\n- **Developed by:** " + ( 

316 "".join( 

317 ( 

318 f"\n - {c.text}: " 

319 + (f"https://www.doi.org/{c.doi}" if c.doi else str(c.url)) 

320 ) 

321 for c in model.cite 

322 ) 

323 ) 

324 else: 

325 developed_by = "" 

326 

327 if model.config.bioimageio.funded_by: 

328 funded_by = f"\n- **Funded by:** {model.config.bioimageio.funded_by}" 

329 else: 

330 funded_by = "" 

331 

332 if model.authors: 

333 shared_by = "\n- **Shared by:** " + ( 

334 "".join( 

335 ( 

336 f"\n - {a.name}" 

337 + (f", {a.affiliation}" if a.affiliation else "") 

338 + ( 

339 f", [https://orcid.org/{a.orcid}](https://orcid.org/{a.orcid})" 

340 if a.orcid 

341 else "" 

342 ) 

343 + ( 

344 f", [https://github.com/{a.github_user}](https://github.com/{a.github_user})" 

345 if a.github_user 

346 else "" 

347 ) 

348 for a in model.authors 

349 ) 

350 ) 

351 ) 

352 else: 

353 shared_by = "" 

354 

355 if model.config.bioimageio.architecture_type: 

356 model_type = f"\n- **Model type:** {model.config.bioimageio.architecture_type}" 

357 else: 

358 model_type = "" 

359 

360 if model.config.bioimageio.modality: 

361 model_modality = f"\n- **Modality:** {model.config.bioimageio.modality}" 

362 else: 

363 model_modality = "" 

364 

365 if model.config.bioimageio.target_structure: 

366 target_structures = "\n- **Target structures:** " + ", ".join( 

367 model.config.bioimageio.target_structure 

368 ) 

369 else: 

370 target_structures = "" 

371 

372 if model.config.bioimageio.task: 

373 task_type = f"\n- **Task type:** {model.config.bioimageio.task}" 

374 else: 

375 task_type = "" 

376 

377 if model.parent: 

378 finetuned_from = f"\n- **Finetuned from model:** {model.parent.id}" 

379 else: 

380 finetuned_from = "" 

381 

382 repository = ( 

383 f"[{model.git_repo}]({model.git_repo})" if model.git_repo else "missing" 

384 ) 

385 

386 dl_framework_parts: List[str] = [] 

387 training_frameworks: List[str] = [] 

388 model_size: Optional[str] = None 

389 for weights in model.weights.available_formats.values(): 

390 if isinstance(weights, (PytorchStateDictWeightsDescr, TorchscriptWeightsDescr)): 

391 dl_framework_version = weights.pytorch_version 

392 elif isinstance( 

393 weights, 

394 ( 

395 TensorflowSavedModelBundleWeightsDescr, 

396 TensorflowJsWeightsDescr, 

397 KerasHdf5WeightsDescr, 

398 ), 

399 ): 

400 dl_framework_version = weights.tensorflow_version 

401 elif isinstance(weights, KerasV3WeightsDescr): 

402 dl_framework_version = weights.keras_version 

403 elif isinstance(weights, OnnxWeightsDescr): 

404 dl_framework_version = f"opset version: {weights.opset_version}" 

405 else: 

406 assert_never(weights) 

407 

408 if weights.parent is None: 

409 training_frameworks.append(weights.weights_format_name) 

410 

411 dl_framework_parts.append( 

412 f"\n - {weights.weights_format_name}: {dl_framework_version}" 

413 ) 

414 

415 if model_size is None: 

416 s = 0 

417 r = weights.get_reader() 

418 for chunk in iter(partial(r.read, 128 * 1024), b""): 

419 s += len(chunk) 

420 

421 if model.config.bioimageio.model_parameter_count is not None: 

422 if model.config.bioimageio.model_parameter_count < 1e9: 

423 model_size = f"{model.config.bioimageio.model_parameter_count / 1e6:.2f} million parameters, " 

424 else: 

425 model_size = f"{model.config.bioimageio.model_parameter_count / 1e9:.2f} billion parameters, " 

426 else: 

427 model_size = "" 

428 

429 if s < 1e9: 

430 model_size += f"{s / 1e6:.2f} MB" 

431 else: 

432 model_size += f"{s / 1e9:.2f} GB" 

433 

434 dl_frameworks = "".join(dl_framework_parts) 

435 if len(training_frameworks) > 1: 

436 warnings.warn( 

437 "Multiple training frameworks detected. (Some weight formats are probably missing a `parent` reference.)" 

438 ) 

439 

440 if ( 

441 model.weights.pytorch_state_dict is not None 

442 and model.weights.pytorch_state_dict.dependencies is not None 

443 ): 

444 env_reader = model.weights.pytorch_state_dict.dependencies.get_reader() 

445 dependencies = f"Dependencies for Pytorch State dict weights are listed in [{env_reader.original_file_name}](package/{env_reader.original_file_name})." 

446 else: 

447 dependencies = "None beyond the respective framework library." 

448 

449 out_of_scope_use = ( 

450 model.config.bioimageio.out_of_scope_use 

451 if model.config.bioimageio.out_of_scope_use 

452 else """missing; therefore these typical limitations should be considered: 

453 

454- *Likely not suitable for diagnostic purposes.* 

455- *Likely not validated for different imaging modalities than present in the training data.* 

456- *Should not be used without proper validation on user's specific datasets.* 

457 

458""" 

459 ) 

460 

461 environmental_impact = model.config.bioimageio.environmental_impact.format_md() 

462 if environmental_impact: 

463 environmental_impact_toc_entry = ( 

464 "\n- [Environmental Impact](#environmental-impact)" 

465 ) 

466 else: 

467 environmental_impact_toc_entry = "" 

468 

469 evaluation_parts: List[str] = [] 

470 n_evals = 0 

471 for e in model.config.bioimageio.evaluations: 

472 if e.dataset_role == "independent": 

473 continue # treated separately below 

474 

475 n_evals += 1 

476 n_evals_str = "" if n_evals == 1 else f" {n_evals}" 

477 evaluation_parts.append(f"\n# Evaluation{n_evals_str}\n") 

478 evaluation_parts.append(e.format_md()) 

479 

480 n_evals = 0 

481 for e in model.config.bioimageio.evaluations: 

482 if e.dataset_role != "independent": 

483 continue # treated separately above 

484 

485 n_evals += 1 

486 n_evals_str = "" if n_evals == 1 else f" {n_evals}" 

487 

488 evaluation_parts.append(f"### Validation on External Data{n_evals_str}\n") 

489 evaluation_parts.append(e.format_md()) 

490 

491 if evaluation_parts: 

492 evaluation = "\n".join(evaluation_parts) 

493 evaluation_toc_entry = "\n- [Evaluation](#evaluation)" 

494 else: 

495 evaluation = "" 

496 evaluation_toc_entry = "" 

497 

498 training_details = "" 

499 if model.config.bioimageio.training.training_preprocessing: 

500 training_details += f"### Preprocessing\n\n{model.config.bioimageio.training.training_preprocessing}\n\n" 

501 

502 training_details += "### Training Hyperparameters\n\n" 

503 training_details += f"- **Framework:** {' / '.join(training_frameworks)}" 

504 if model.config.bioimageio.training.training_epochs is not None: 

505 training_details += ( 

506 f"- **Epochs:** {model.config.bioimageio.training.training_epochs}\n" 

507 ) 

508 

509 if model.config.bioimageio.training.training_batch_size is not None: 

510 training_details += f"- **Batch size:** {model.config.bioimageio.training.training_batch_size}\n" 

511 

512 if model.config.bioimageio.training.initial_learning_rate is not None: 

513 training_details += f"- **Initial learning rate:** {model.config.bioimageio.training.initial_learning_rate}\n" 

514 

515 if model.config.bioimageio.training.learning_rate_schedule is not None: 

516 training_details += f"- **Learning rate schedule:** {model.config.bioimageio.training.learning_rate_schedule}\n" 

517 

518 if model.config.bioimageio.training.loss_function is not None: 

519 training_details += ( 

520 f"- **Loss function:** {model.config.bioimageio.training.loss_function}" 

521 ) 

522 if model.config.bioimageio.training.loss_function_kwargs: 

523 training_details += ( 

524 f" with {model.config.bioimageio.training.loss_function_kwargs}" 

525 ) 

526 training_details += "\n" 

527 

528 if model.config.bioimageio.training.optimizer is not None: 

529 training_details += ( 

530 f"- **Optimizer:** {model.config.bioimageio.training.optimizer}" 

531 ) 

532 if model.config.bioimageio.training.optimizer_kwargs: 

533 training_details += ( 

534 f" with {model.config.bioimageio.training.optimizer_kwargs}" 

535 ) 

536 training_details += "\n" 

537 

538 if model.config.bioimageio.training.regularization is not None: 

539 training_details += ( 

540 f"- **Regularization:** {model.config.bioimageio.training.regularization}\n" 

541 ) 

542 

543 speeds_sizes_times = "### Speeds, Sizes, Times\n\n" 

544 if model.config.bioimageio.training.training_duration is not None: 

545 speeds_sizes_times += f"- **Training time:** {'{:.2f}'.format(model.config.bioimageio.training.training_duration)}\n" 

546 

547 speeds_sizes_times += f"- **Model size:** {model_size}\n" 

548 if model.config.bioimageio.inference_time: 

549 speeds_sizes_times += ( 

550 f"- **Inference time:** {model.config.bioimageio.inference_time}\n" 

551 ) 

552 

553 if model.config.bioimageio.memory_requirements_inference: 

554 speeds_sizes_times += f"- **Memory requirements:** {model.config.bioimageio.memory_requirements_inference}\n" 

555 

556 model_arch_and_objective = "## Model Architecture and Objective\n\n" 

557 if ( 

558 model.config.bioimageio.architecture_type 

559 or model.config.bioimageio.architecture_description 

560 ): 

561 model_arch_and_objective += ( 

562 f"- **Architecture:** {model.config.bioimageio.architecture_type or ''}" 

563 + ( 

564 " --- " 

565 if model.config.bioimageio.architecture_type 

566 and model.config.bioimageio.architecture_description 

567 else "" 

568 ) 

569 + ( 

570 model.config.bioimageio.architecture_description 

571 if model.config.bioimageio.architecture_description is not None 

572 else "" 

573 ) 

574 + "\n" 

575 ) 

576 

577 io_desc, referenced_files, input_ids, output_ids = _get_io_description(model) 

578 predict_snippet_inputs = str( 

579 {input_id: "<path or tensor>" for input_id in input_ids} 

580 ) 

581 model_arch_and_objective += io_desc 

582 

583 hardware_requirements = "\n### Hardware Requirements\n" 

584 if model.config.bioimageio.memory_requirements_training is not None: 

585 hardware_requirements += f"- **Training:** GPU memory: {model.config.bioimageio.memory_requirements_training}\n" 

586 

587 if model.config.bioimageio.memory_requirements_inference is not None: 

588 hardware_requirements += f"- **Inference:** GPU memory: {model.config.bioimageio.memory_requirements_inference}\n" 

589 

590 hardware_requirements += f"- **Storage:** Model size: {model_size}\n" 

591 

592 if model.license is None: 

593 license = "unknown" 

594 license_meta = "unknown" 

595 else: 

596 spdx_licenses = get_spdx_licenses() 

597 matches = [ 

598 (entry["name"], entry["reference"]) 

599 for entry in spdx_licenses["licenses"] 

600 if entry["licenseId"].lower() == model.license.lower() 

601 ] 

602 if matches: 

603 if len(matches) > 1: 

604 logger.warning( 

605 "Multiple SPDX license matches found for '{}', using the first one.", 

606 model.license, 

607 ) 

608 name, reference = matches[0] 

609 license = f"[{name}]({reference})" 

610 if model.license.lower() in HF_KNOWN_LICENSES: 

611 license_meta = model.license.lower() 

612 else: 

613 license_meta = f"other\nlicense_name: {model.license.lower()}\nlicense_link: {reference}" 

614 else: 

615 if model.license.lower() in HF_KNOWN_LICENSES: 

616 license_meta = model.license.lower() 

617 else: 

618 license_meta = "unknown" 

619 

620 license = model.license.lower() 

621 

622 base_model = ( 

623 f"\nbase_model: {model.parent.id[len('huggingface/') :]}" 

624 if model.parent is not None and model.parent.id.startswith("huggingface/") 

625 else "" 

626 ) 

627 dataset_meta = ( 

628 f"\ndataset: {model.training_data.id[len('huggingface/') :]}" 

629 if model.training_data is not None 

630 and model.training_data.id is not None 

631 and model.training_data.id.startswith("huggingface/") 

632 else "" 

633 ) 

634 if model.covers: 

635 cover_image_reader = get_reader(model.covers[0]) 

636 cover_image_bytes = cover_image_reader.read() 

637 cover_image_filename = f"images/{cover_image_reader.original_file_name}" 

638 referenced_files[cover_image_filename] = cover_image_bytes 

639 cover_image_md = f"\n![cover image]({cover_image_filename})\n\n" 

640 thumbnail_meta = ( 

641 f"\nthumbnail: {cover_image_filename}" # TODO: fix this to be a proper URL 

642 ) 

643 

644 else: 

645 cover_image_md = "" 

646 thumbnail_meta = "" 

647 

648 # TODO: add pipeline_tag to metadata 

649 readme = f"""--- 

650license: {license_meta}{thumbnail_meta} 

651tags: {list({"biology"}.union(set(model.tags)))} 

652language: [en] 

653library_name: bioimageio{base_model}{dataset_meta} 

654--- 

655# {model.name}{cover_image_md} 

656 

657{model.description or ""} 

658 

659 

660# Table of Contents 

661 

662- [Model Details](#model-details) 

663- [Uses](#uses) 

664- [Bias, Risks, and Limitations](#bias-risks-and-limitations) 

665- [How to Get Started with the Model](#how-to-get-started-with-the-model) 

666- [Training Details](#training-details){evaluation_toc_entry}{ 

667 environmental_impact_toc_entry 

668 } 

669- [Technical Specifications](#technical-specifications) 

670 

671 

672# Model Details 

673 

674## Model Description 

675{model_version}{additional_model_doc}{developed_by}{funded_by}{shared_by}{model_type}{ 

676 model_modality 

677 }{target_structures}{task_type} 

678- **License:** {license}{finetuned_from} 

679 

680## Model Sources 

681 

682- **Repository:** {repository} 

683- **Paper:** see [**Developed by**](#model-description) 

684 

685# Uses 

686 

687## Direct Use 

688 

689This model is compatible with the bioimageio.spec Python package (version >= { 

690 VERSION 

691 }) and the bioimageio.core Python package supporting model inference in Python code or via the `bioimageio` CLI. 

692 

693```python 

694from bioimageio.core import predict 

695 

696output_sample = predict( 

697 "huggingface/{repo_id}/{model.version or "draft"}", 

698 inputs={predict_snippet_inputs}, 

699) 

700 

701output_tensor = output_sample.members["{ 

702 output_ids[0] if output_ids else "<output_id>" 

703 }"] 

704xarray_dataarray = output_tensor.data 

705numpy_ndarray = output_tensor.data.to_numpy() 

706``` 

707 

708## Downstream Use 

709 

710Specific bioimage.io partner tool compatibilities may be reported at [Compatibility Reports](https://bioimage-io.github.io/collection/latest/compatibility/#compatibility-by-resource). 

711{ 

712 "Training (and fine-tuning) code may be available at " + model.git_repo + "." 

713 if model.git_repo 

714 else "" 

715 } 

716 

717## Out-of-Scope Use 

718 

719{out_of_scope_use} 

720 

721 

722{model.config.bioimageio.bias_risks_limitations.format_md()} 

723 

724# How to Get Started with the Model 

725 

726You can use "huggingface/{repo_id}/{ 

727 model.version or "draft" 

728 }" as the resource identifier to load this model directly from the Hugging Face Hub using bioimageio.spec or bioimageio.core. 

729 

730See [bioimageio.core documentation: Get started](https://bioimage-io.github.io/core-bioimage-io-python/latest/get-started) for instructions on how to load and run this model using the `bioimageio.core` Python package or the bioimageio CLI. 

731 

732# Training Details 

733 

734## Training Data 

735 

736{ 

737 "This model was trained on `" + str(model.training_data.id) + "`." 

738 if model.training_data is not None 

739 else "missing" 

740 } 

741 

742## Training Procedure 

743 

744{training_details} 

745 

746{speeds_sizes_times} 

747{evaluation} 

748{environmental_impact} 

749 

750# Technical Specifications 

751 

752{model_arch_and_objective} 

753 

754## Compute Infrastructure 

755 

756{hardware_requirements} 

757 

758### Software 

759 

760- **Framework:** {dl_frameworks} 

761- **Libraries:** {dependencies} 

762- **BioImage.IO partner compatibility:** [Compatibility Reports](https://bioimage-io.github.io/collection/latest/compatibility/#compatibility-by-resource) 

763 

764--- 

765 

766*This model card was created using the template of the bioimageio.spec Python Package, which intern is based on the BioImage Model Zoo template, incorporating best practices from the Hugging Face Model Card Template. For more information on contributing models, visit [bioimage.io](https://bioimage.io).* 

767 

768--- 

769 

770**References:** 

771 

772- [Hugging Face Model Card Template](https://huggingface.co/docs/hub/en/model-card-annotated) 

773- [Hugging Face modelcard_template.md](https://github.com/huggingface/huggingface_hub/blob/b9decfdf9b9a162012bc52f260fd64fc37db660e/src/huggingface_hub/templates/modelcard_template.md) 

774- [BioImage Model Zoo Documentation](https://bioimage.io/docs/) 

775- [Model Cards for Model Reporting](https://arxiv.org/abs/1810.03993) 

776- [bioimageio.spec Python Package](https://bioimage-io.github.io/spec-bioimage-io) 

777""" 

778 

779 return readme, referenced_files