Coverage for src / bioimageio / core / cli.py: 84%

372 statements  

« prev     ^ index     » next       coverage.py v7.13.4, created at 2026-02-13 09:46 +0000

1"""bioimageio CLI 

2 

3Note: Some docstrings use a hair space ' ' 

4 to place the added '(default: ...)' on a new line. 

5""" 

6 

7import json 

8import shutil 

9import subprocess 

10import sys 

11from abc import ABC 

12from argparse import RawTextHelpFormatter 

13from difflib import SequenceMatcher 

14from functools import cached_property 

15from io import StringIO 

16from pathlib import Path 

17from pprint import pformat, pprint 

18from typing import ( 

19 Annotated, 

20 Any, 

21 Dict, 

22 Iterable, 

23 List, 

24 Literal, 

25 Mapping, 

26 Optional, 

27 Sequence, 

28 Set, 

29 Tuple, 

30 Type, 

31 Union, 

32) 

33 

34import rich.markdown 

35from loguru import logger 

36from pydantic import ( 

37 AliasChoices, 

38 BaseModel, 

39 Field, 

40 PlainSerializer, 

41 WithJsonSchema, 

42 model_validator, 

43) 

44from pydantic_settings import ( 

45 BaseSettings, 

46 CliApp, 

47 CliPositionalArg, 

48 CliSettingsSource, 

49 CliSubCommand, 

50 JsonConfigSettingsSource, 

51 PydanticBaseSettingsSource, 

52 SettingsConfigDict, 

53 YamlConfigSettingsSource, 

54) 

55from tqdm import tqdm 

56from typing_extensions import assert_never 

57 

58import bioimageio.spec 

59from bioimageio.core import __version__ 

60from bioimageio.spec import ( 

61 AnyModelDescr, 

62 InvalidDescr, 

63 ResourceDescr, 

64 load_description, 

65 save_bioimageio_yaml_only, 

66 settings, 

67 update_format, 

68 update_hashes, 

69) 

70from bioimageio.spec._internal.io import is_yaml_value 

71from bioimageio.spec._internal.io_utils import open_bioimageio_yaml 

72from bioimageio.spec._internal.types import FormatVersionPlaceholder, NotEmpty 

73from bioimageio.spec.dataset import DatasetDescr 

74from bioimageio.spec.model import ModelDescr, v0_4, v0_5 

75from bioimageio.spec.notebook import NotebookDescr 

76from bioimageio.spec.utils import ensure_description_is_model, get_reader, write_yaml 

77 

78from .commands import WeightFormatArgAll, WeightFormatArgAny, package, test 

79from .common import MemberId, SampleId, SupportedWeightsFormat 

80from .digest_spec import get_member_ids, load_sample_for_model 

81from .io import load_dataset_stat, save_dataset_stat, save_sample 

82from .prediction import create_prediction_pipeline 

83from .proc_setup import ( 

84 DatasetMeasure, 

85 Measure, 

86 MeasureValue, 

87 StatsCalculator, 

88 get_required_dataset_measures, 

89) 

90from .sample import Sample 

91from .stat_measures import Stat 

92from .utils import compare 

93from .weight_converters._add_weights import add_weights 

94 

95WEIGHT_FORMAT_ALIASES = AliasChoices( 

96 "weight-format", 

97 "weights-format", 

98 "weight_format", 

99 "weights_format", 

100) 

101 

102 

103class CmdBase(BaseModel, use_attribute_docstrings=True, cli_implicit_flags=True): 

104 pass 

105 

106 

107class ArgMixin(BaseModel, use_attribute_docstrings=True, cli_implicit_flags=True): 

108 pass 

109 

110 

111class WithSummaryLogging(ArgMixin): 

112 summary: List[Union[Literal["display"], Path]] = Field( 

113 default_factory=lambda: ["display"], 

114 examples=[ 

115 Path("summary.md"), 

116 Path("bioimageio_summaries/"), 

117 ["display", Path("summary.md")], 

118 ], 

119 ) 

120 """Display the validation summary or save it as JSON, Markdown or HTML. 

121 The format is chosen based on the suffix: `.json`, `.md`, `.html`. 

122 If a folder is given (path w/o suffix) the summary is saved in all formats. 

123 Choose/add `"display"` to render the validation summary to the terminal. 

124 """ 

125 

126 def log(self, descr: Union[ResourceDescr, InvalidDescr]): 

127 _ = descr.validation_summary.log(self.summary) 

128 

129 

130class WithSource(ArgMixin): 

131 source: CliPositionalArg[str] 

132 """Url/path to a (folder with a) bioimageio.yaml/rdf.yaml file 

133 or a bioimage.io resource identifier, e.g. 'affable-shark'""" 

134 

135 @cached_property 

136 def descr(self): 

137 return load_description(self.source) 

138 

139 @property 

140 def descr_id(self) -> str: 

141 """a more user-friendly description id 

142 (replacing legacy ids with their nicknames) 

143 """ 

144 if isinstance(self.descr, InvalidDescr): 

145 return str(getattr(self.descr, "id", getattr(self.descr, "name"))) 

146 

147 nickname = None 

148 if ( 

149 isinstance(self.descr.config, v0_5.Config) 

150 and (bio_config := self.descr.config.bioimageio) 

151 and bio_config.model_extra is not None 

152 ): 

153 nickname = bio_config.model_extra.get("nickname") 

154 

155 return str(nickname or self.descr.id or self.descr.name) 

156 

157 

158class ValidateFormatCmd(CmdBase, WithSource, WithSummaryLogging): 

159 """Validate the meta data format of a bioimageio resource.""" 

160 

161 perform_io_checks: bool = Field( 

162 settings.perform_io_checks, alias="perform-io-checks" 

163 ) 

164 """Wether or not to perform validations that requires downloading remote files. 

165 Note: Default value is set by `BIOIMAGEIO_PERFORM_IO_CHECKS` environment variable. 

166 """ 

167 

168 @cached_property 

169 def descr(self): 

170 return load_description(self.source, perform_io_checks=self.perform_io_checks) 

171 

172 def cli_cmd(self): 

173 self.log(self.descr) 

174 sys.exit( 

175 0 

176 if self.descr.validation_summary.status in ("valid-format", "passed") 

177 else 1 

178 ) 

179 

180 

181class TestCmd(CmdBase, WithSource, WithSummaryLogging): 

182 """Test a bioimageio resource (beyond meta data formatting).""" 

183 

184 weight_format: WeightFormatArgAll = Field( 

185 "all", 

186 alias="weight-format", 

187 validation_alias=WEIGHT_FORMAT_ALIASES, 

188 ) 

189 """The weight format to limit testing to. 

190 

191 (only relevant for model resources)""" 

192 

193 devices: Optional[List[str]] = None 

194 """Device(s) to use for testing""" 

195 

196 runtime_env: Union[Literal["currently-active", "as-described"], Path] = Field( 

197 "currently-active", alias="runtime-env" 

198 ) 

199 """The python environment to run the tests in 

200 - `"currently-active"`: use active Python interpreter 

201 - `"as-described"`: generate a conda environment YAML file based on the model 

202 weights description. 

203 - A path to a conda environment YAML. 

204 Note: The `bioimageio.core` dependency will be added automatically if not present. 

205 """ 

206 

207 working_dir: Optional[Path] = Field(None, alias="working-dir") 

208 """(for debugging) Directory to save any temporary files.""" 

209 

210 determinism: Literal["seed_only", "full"] = "seed_only" 

211 """Modes to improve reproducibility of test outputs.""" 

212 

213 stop_early: bool = Field( 

214 False, alias="stop-early", validation_alias=AliasChoices("stop-early", "x") 

215 ) 

216 """Do not run further subtests after a failed one.""" 

217 

218 format_version: Union[FormatVersionPlaceholder, str] = Field( 

219 "discover", alias="format-version" 

220 ) 

221 """The format version to use for testing. 

222 - 'latest': Use the latest implemented format version for the given resource type (may trigger auto updating) 

223 - 'discover': Use the format version as described in the resource description 

224 - '0.4', '0.5', ...: Use the specified format version (may trigger auto updating) 

225 """ 

226 

227 def cli_cmd(self): 

228 sys.exit( 

229 test( 

230 self.descr, 

231 weight_format=self.weight_format, 

232 devices=self.devices, 

233 summary=self.summary, 

234 runtime_env=self.runtime_env, 

235 determinism=self.determinism, 

236 format_version=self.format_version, 

237 working_dir=self.working_dir, 

238 ) 

239 ) 

240 

241 

242class PackageCmd(CmdBase, WithSource, WithSummaryLogging): 

243 """Save a resource's metadata with its associated files.""" 

244 

245 path: CliPositionalArg[Path] 

246 """The path to write the (zipped) package to. 

247 If it does not have a `.zip` suffix 

248 this command will save the package as an unzipped folder instead.""" 

249 

250 weight_format: WeightFormatArgAll = Field( 

251 "all", 

252 alias="weight-format", 

253 validation_alias=WEIGHT_FORMAT_ALIASES, 

254 ) 

255 """The weight format to include in the package (for model descriptions only).""" 

256 

257 def cli_cmd(self): 

258 if isinstance(self.descr, InvalidDescr): 

259 self.log(self.descr) 

260 raise ValueError(f"Invalid {self.descr.type} description.") 

261 

262 sys.exit( 

263 package( 

264 self.descr, 

265 self.path, 

266 weight_format=self.weight_format, 

267 ) 

268 ) 

269 

270 

271def _get_stat( 

272 model_descr: AnyModelDescr, 

273 dataset: Iterable[Sample], 

274 dataset_length: int, 

275 stats_path: Path, 

276) -> Mapping[DatasetMeasure, MeasureValue]: 

277 req_dataset_meas, _ = get_required_dataset_measures(model_descr) 

278 if not req_dataset_meas: 

279 return {} 

280 

281 req_dataset_meas, _ = get_required_dataset_measures(model_descr) 

282 

283 if stats_path.exists(): 

284 logger.info("loading precomputed dataset measures from {}", stats_path) 

285 stat = load_dataset_stat(stats_path) 

286 for m in req_dataset_meas: 

287 if m not in stat: 

288 raise ValueError(f"Missing {m} in {stats_path}") 

289 

290 return stat 

291 

292 stats_calc = StatsCalculator(req_dataset_meas) 

293 

294 for sample in tqdm( 

295 dataset, total=dataset_length, desc="precomputing dataset stats", unit="sample" 

296 ): 

297 stats_calc.update(sample) 

298 

299 stat = stats_calc.finalize() 

300 save_dataset_stat(stat, stats_path) 

301 

302 return stat 

303 

304 

305class UpdateCmdBase(CmdBase, WithSource, ABC): 

306 output: Union[Literal["display", "stdout"], Path] = "display" 

307 """Output updated bioimageio.yaml to the terminal or write to a file. 

308 Notes: 

309 - `"display"`: Render to the terminal with syntax highlighting. 

310 - `"stdout"`: Write to sys.stdout without syntax highligthing. 

311 (More convenient for copying the updated bioimageio.yaml from the terminal.) 

312 """ 

313 

314 diff: Union[bool, Path] = Field(True, alias="diff") 

315 """Output a diff of original and updated bioimageio.yaml. 

316 If a given path has an `.html` extension, a standalone HTML file is written, 

317 otherwise the diff is saved in unified diff format (pure text). 

318 """ 

319 

320 exclude_unset: bool = Field(True, alias="exclude-unset") 

321 """Exclude fields that have not explicitly be set.""" 

322 

323 exclude_defaults: bool = Field(False, alias="exclude-defaults") 

324 """Exclude fields that have the default value (even if set explicitly).""" 

325 

326 @cached_property 

327 def updated(self) -> Union[ResourceDescr, InvalidDescr]: 

328 raise NotImplementedError 

329 

330 def cli_cmd(self): 

331 original_yaml = open_bioimageio_yaml(self.source).unparsed_content 

332 assert isinstance(original_yaml, str) 

333 stream = StringIO() 

334 

335 save_bioimageio_yaml_only( 

336 self.updated, 

337 stream, 

338 exclude_unset=self.exclude_unset, 

339 exclude_defaults=self.exclude_defaults, 

340 ) 

341 updated_yaml = stream.getvalue() 

342 

343 diff = compare( 

344 original_yaml.split("\n"), 

345 updated_yaml.split("\n"), 

346 diff_format=( 

347 "html" 

348 if isinstance(self.diff, Path) and self.diff.suffix == ".html" 

349 else "unified" 

350 ), 

351 ) 

352 

353 if isinstance(self.diff, Path): 

354 _ = self.diff.write_text(diff, encoding="utf-8") 

355 elif self.diff: 

356 console = rich.console.Console() 

357 diff_md = f"## Diff\n\n````````diff\n{diff}\n````````" 

358 console.print(rich.markdown.Markdown(diff_md)) 

359 

360 if isinstance(self.output, Path): 

361 _ = self.output.write_text(updated_yaml, encoding="utf-8") 

362 logger.info(f"written updated description to {self.output}") 

363 elif self.output == "display": 

364 updated_md = f"## Updated bioimageio.yaml\n\n```yaml\n{updated_yaml}\n```" 

365 rich.console.Console().print(rich.markdown.Markdown(updated_md)) 

366 elif self.output == "stdout": 

367 print(updated_yaml) 

368 else: 

369 assert_never(self.output) 

370 

371 if isinstance(self.updated, InvalidDescr): 

372 logger.warning("Update resulted in invalid description") 

373 _ = self.updated.validation_summary.display() 

374 

375 

376class UpdateFormatCmd(UpdateCmdBase): 

377 """Update the metadata format to the latest format version.""" 

378 

379 exclude_defaults: bool = Field(True, alias="exclude-defaults") 

380 """Exclude fields that have the default value (even if set explicitly). 

381 

382 Note: 

383 The update process sets most unset fields explicitly with their default value. 

384 """ 

385 

386 perform_io_checks: bool = Field( 

387 settings.perform_io_checks, alias="perform-io-checks" 

388 ) 

389 """Wether or not to attempt validation that may require file download. 

390 If `True` file hash values are added if not present.""" 

391 

392 @cached_property 

393 def updated(self): 

394 return update_format( 

395 self.source, 

396 exclude_defaults=self.exclude_defaults, 

397 perform_io_checks=self.perform_io_checks, 

398 ) 

399 

400 

401class UpdateHashesCmd(UpdateCmdBase): 

402 """Create a bioimageio.yaml description with updated file hashes.""" 

403 

404 @cached_property 

405 def updated(self): 

406 return update_hashes(self.source) 

407 

408 

409class PredictCmd(CmdBase, WithSource): 

410 """Run inference on your data with a bioimage.io model.""" 

411 

412 inputs: NotEmpty[List[Union[str, NotEmpty[List[str]]]]] = Field( 

413 default_factory=lambda: ["{input_id}/001.tif"] 

414 ) 

415 """Model input sample paths (for each input tensor) 

416 

417 The input paths are expected to have shape... 

418 - (n_samples,) or (n_samples,1) for models expecting a single input tensor 

419 - (n_samples,) containing the substring '{input_id}', or 

420 - (n_samples, n_model_inputs) to provide each input tensor path explicitly. 

421 

422 All substrings that are replaced by metadata from the model description: 

423 - '{model_id}' 

424 - '{input_id}' 

425 

426 Example inputs to process sample 'a' and 'b' 

427 for a model expecting a 'raw' and a 'mask' input tensor: 

428 --inputs="[[\\"a_raw.tif\\",\\"a_mask.tif\\"],[\\"b_raw.tif\\",\\"b_mask.tif\\"]]" 

429 (Note that JSON double quotes need to be escaped.) 

430 

431 Alternatively a `bioimageio-cli.yaml` (or `bioimageio-cli.json`) file 

432 may provide the arguments, e.g.: 

433 ```yaml 

434 inputs: 

435 - [a_raw.tif, a_mask.tif] 

436 - [b_raw.tif, b_mask.tif] 

437 ``` 

438 

439 `.npy` and any file extension supported by imageio are supported. 

440 Aavailable formats are listed at 

441 https://imageio.readthedocs.io/en/stable/formats/index.html#all-formats. 

442 Some formats have additional dependencies. 

443 

444 

445 """ 

446 

447 outputs: Union[str, NotEmpty[Tuple[str, ...]]] = ( 

448 "outputs_{model_id}/{output_id}/{sample_id}.tif" 

449 ) 

450 """Model output path pattern (per output tensor) 

451 

452 All substrings that are replaced: 

453 - '{model_id}' (from model description) 

454 - '{output_id}' (from model description) 

455 - '{sample_id}' (extracted from input paths) 

456 

457 

458 """ 

459 

460 overwrite: bool = False 

461 """allow overwriting existing output files""" 

462 

463 blockwise: bool = False 

464 """process inputs blockwise""" 

465 

466 stats: Annotated[ 

467 Path, 

468 WithJsonSchema({"type": "string"}), 

469 PlainSerializer(lambda p: p.as_posix(), return_type=str), 

470 ] = Path("dataset_statistics.json") 

471 """path to dataset statistics 

472 (will be written if it does not exist, 

473 but the model requires statistical dataset measures) 

474  """ 

475 

476 preview: bool = False 

477 """preview which files would be processed 

478 and what outputs would be generated.""" 

479 

480 weight_format: WeightFormatArgAny = Field( 

481 "any", 

482 alias="weight-format", 

483 validation_alias=WEIGHT_FORMAT_ALIASES, 

484 ) 

485 """The weight format to use.""" 

486 

487 example: bool = False 

488 """generate and run an example 

489 

490 1. downloads example model inputs 

491 2. creates a `{model_id}_example` folder 

492 3. writes input arguments to `{model_id}_example/bioimageio-cli.yaml` 

493 4. executes a preview dry-run 

494 5. executes prediction with example input 

495 

496 

497 """ 

498 

499 def _example(self): 

500 model_descr = ensure_description_is_model(self.descr) 

501 input_ids = get_member_ids(model_descr.inputs) 

502 example_inputs = ( 

503 model_descr.sample_inputs 

504 if isinstance(model_descr, v0_4.ModelDescr) 

505 else [ 

506 t 

507 for ipt in model_descr.inputs 

508 if (t := ipt.sample_tensor or ipt.test_tensor) 

509 ] 

510 ) 

511 if not example_inputs: 

512 raise ValueError(f"{self.descr_id} does not specify any example inputs.") 

513 

514 inputs001: List[str] = [] 

515 example_path = Path(f"{self.descr_id}_example") 

516 example_path.mkdir(exist_ok=True) 

517 

518 for t, src in zip(input_ids, example_inputs): 

519 reader = get_reader(src) 

520 dst = Path(f"{example_path}/{t}/001{reader.suffix}") 

521 dst.parent.mkdir(parents=True, exist_ok=True) 

522 inputs001.append(dst.as_posix()) 

523 with dst.open("wb") as f: 

524 shutil.copyfileobj(reader, f) 

525 

526 inputs = [inputs001] 

527 output_pattern = f"{example_path}/outputs/{{output_id}}/{{sample_id}}.tif" 

528 

529 bioimageio_cli_path = example_path / YAML_FILE 

530 stats_file = "dataset_statistics.json" 

531 stats = (example_path / stats_file).as_posix() 

532 cli_example_args = dict( 

533 inputs=inputs, 

534 outputs=output_pattern, 

535 stats=stats_file, 

536 blockwise=self.blockwise, 

537 ) 

538 assert is_yaml_value(cli_example_args), cli_example_args 

539 write_yaml( 

540 cli_example_args, 

541 bioimageio_cli_path, 

542 ) 

543 

544 yaml_file_content = None 

545 

546 # escaped double quotes 

547 inputs_json = json.dumps(inputs) 

548 inputs_escaped = inputs_json.replace('"', r"\"") 

549 source_escaped = self.source.replace('"', r"\"") 

550 

551 def get_example_command(preview: bool, escape: bool = False): 

552 q: str = '"' if escape else "" 

553 

554 return [ 

555 "bioimageio", 

556 "predict", 

557 # --no-preview not supported for py=3.8 

558 *(["--preview"] if preview else []), 

559 "--overwrite", 

560 *(["--blockwise"] if self.blockwise else []), 

561 f"--stats={q}{stats}{q}", 

562 f"--inputs={q}{inputs_escaped if escape else inputs_json}{q}", 

563 f"--outputs={q}{output_pattern}{q}", 

564 f"{q}{source_escaped if escape else self.source}{q}", 

565 ] 

566 

567 if Path(YAML_FILE).exists(): 

568 logger.info( 

569 "temporarily removing '{}' to execute example prediction", YAML_FILE 

570 ) 

571 yaml_file_content = Path(YAML_FILE).read_bytes() 

572 Path(YAML_FILE).unlink() 

573 

574 try: 

575 _ = subprocess.run(get_example_command(True), check=True) 

576 _ = subprocess.run(get_example_command(False), check=True) 

577 finally: 

578 if yaml_file_content is not None: 

579 _ = Path(YAML_FILE).write_bytes(yaml_file_content) 

580 logger.debug("restored '{}'", YAML_FILE) 

581 

582 print( 

583 "🎉 Sucessfully ran example prediction!\n" 

584 + "To predict the example input using the CLI example config file" 

585 + f" {example_path / YAML_FILE}, execute `bioimageio predict` from {example_path}:\n" 

586 + f"$ cd {str(example_path)}\n" 

587 + f'$ bioimageio predict "{source_escaped}"\n\n' 

588 + "Alternatively run the following command" 

589 + " in the current workind directory, not the example folder:\n$ " 

590 + " ".join(get_example_command(False, escape=True)) 

591 + f"\n(note that a local '{JSON_FILE}' or '{YAML_FILE}' may interfere with this)" 

592 ) 

593 

594 def cli_cmd(self): 

595 if self.example: 

596 return self._example() 

597 

598 model_descr = ensure_description_is_model(self.descr) 

599 

600 input_ids = get_member_ids(model_descr.inputs) 

601 output_ids = get_member_ids(model_descr.outputs) 

602 

603 minimum_input_ids = tuple( 

604 str(ipt.id) if isinstance(ipt, v0_5.InputTensorDescr) else str(ipt.name) 

605 for ipt in model_descr.inputs 

606 if not isinstance(ipt, v0_5.InputTensorDescr) or not ipt.optional 

607 ) 

608 maximum_input_ids = tuple( 

609 str(ipt.id) if isinstance(ipt, v0_5.InputTensorDescr) else str(ipt.name) 

610 for ipt in model_descr.inputs 

611 ) 

612 

613 def expand_inputs(i: int, ipt: Union[str, Sequence[str]]) -> Tuple[str, ...]: 

614 if isinstance(ipt, str): 

615 ipts = tuple( 

616 ipt.format(model_id=self.descr_id, input_id=t) for t in input_ids 

617 ) 

618 else: 

619 ipts = tuple( 

620 p.format(model_id=self.descr_id, input_id=t) 

621 for t, p in zip(input_ids, ipt) 

622 ) 

623 

624 if len(set(ipts)) < len(ipts): 

625 if len(minimum_input_ids) == len(maximum_input_ids): 

626 n = len(minimum_input_ids) 

627 else: 

628 n = f"{len(minimum_input_ids)}-{len(maximum_input_ids)}" 

629 

630 raise ValueError( 

631 f"[input sample #{i}] Include '{{input_id}}' in path pattern or explicitly specify {n} distinct input paths (got {ipt})" 

632 ) 

633 

634 if len(ipts) < len(minimum_input_ids): 

635 raise ValueError( 

636 f"[input sample #{i}] Expected at least {len(minimum_input_ids)} inputs {minimum_input_ids}, got {ipts}" 

637 ) 

638 

639 if len(ipts) > len(maximum_input_ids): 

640 raise ValueError( 

641 f"Expected at most {len(maximum_input_ids)} inputs {maximum_input_ids}, got {ipts}" 

642 ) 

643 

644 return ipts 

645 

646 inputs = [expand_inputs(i, ipt) for i, ipt in enumerate(self.inputs, start=1)] 

647 

648 sample_paths_in = [ 

649 {t: Path(p) for t, p in zip(input_ids, ipts)} for ipts in inputs 

650 ] 

651 

652 sample_ids = _get_sample_ids(sample_paths_in) 

653 

654 def expand_outputs(): 

655 if isinstance(self.outputs, str): 

656 outputs = [ 

657 tuple( 

658 Path( 

659 self.outputs.format( 

660 model_id=self.descr_id, output_id=t, sample_id=s 

661 ) 

662 ) 

663 for t in output_ids 

664 ) 

665 for s in sample_ids 

666 ] 

667 else: 

668 outputs = [ 

669 tuple( 

670 Path(p.format(model_id=self.descr_id, output_id=t, sample_id=s)) 

671 for t, p in zip(output_ids, self.outputs) 

672 ) 

673 for s in sample_ids 

674 ] 

675 # check for distinctness and correct number within each output sample 

676 for i, out in enumerate(outputs, start=1): 

677 if len(set(out)) < len(out): 

678 raise ValueError( 

679 f"[output sample #{i}] Include '{{output_id}}' in path pattern or explicitly specify {len(output_ids)} distinct output paths (got {out})" 

680 ) 

681 

682 if len(out) != len(output_ids): 

683 raise ValueError( 

684 f"[output sample #{i}] Expected {len(output_ids)} outputs {output_ids}, got {out}" 

685 ) 

686 

687 # check for distinctness across all output samples 

688 all_output_paths = [p for out in outputs for p in out] 

689 if len(set(all_output_paths)) < len(all_output_paths): 

690 raise ValueError( 

691 "Output paths are not distinct across samples. " 

692 + f"Make sure to include '{{sample_id}}' in the output path pattern." 

693 ) 

694 

695 return outputs 

696 

697 outputs = expand_outputs() 

698 

699 sample_paths_out = [ 

700 {MemberId(t): Path(p) for t, p in zip(output_ids, out)} for out in outputs 

701 ] 

702 

703 if not self.overwrite: 

704 for sample_paths in sample_paths_out: 

705 for p in sample_paths.values(): 

706 if p.exists(): 

707 raise FileExistsError( 

708 f"{p} already exists. use --overwrite to (re-)write outputs anyway." 

709 ) 

710 if self.preview: 

711 print("🛈 bioimageio prediction preview structure:") 

712 pprint( 

713 { 

714 "{sample_id}": dict( 

715 inputs={"{input_id}": "<input path>"}, 

716 outputs={"{output_id}": "<output path>"}, 

717 ) 

718 } 

719 ) 

720 print("🔎 bioimageio prediction preview output:") 

721 pprint( 

722 { 

723 s: dict( 

724 inputs={t: p.as_posix() for t, p in sp_in.items()}, 

725 outputs={t: p.as_posix() for t, p in sp_out.items()}, 

726 ) 

727 for s, sp_in, sp_out in zip( 

728 sample_ids, sample_paths_in, sample_paths_out 

729 ) 

730 } 

731 ) 

732 return 

733 

734 def input_dataset(stat: Stat): 

735 for s, sp_in in zip(sample_ids, sample_paths_in): 

736 yield load_sample_for_model( 

737 model=model_descr, 

738 paths=sp_in, 

739 stat=stat, 

740 sample_id=s, 

741 ) 

742 

743 stat: Dict[Measure, MeasureValue] = dict( 

744 _get_stat( 

745 model_descr, input_dataset({}), len(sample_ids), self.stats 

746 ).items() 

747 ) 

748 

749 pp = create_prediction_pipeline( 

750 model_descr, 

751 weight_format=None if self.weight_format == "any" else self.weight_format, 

752 ) 

753 predict_method = ( 

754 pp.predict_sample_with_blocking 

755 if self.blockwise 

756 else pp.predict_sample_without_blocking 

757 ) 

758 

759 for sample_in, sp_out in tqdm( 

760 zip(input_dataset(dict(stat)), sample_paths_out), 

761 total=len(inputs), 

762 desc=f"predict with {self.descr_id}", 

763 unit="sample", 

764 ): 

765 sample_out = predict_method(sample_in) 

766 save_sample(sp_out, sample_out) 

767 

768 

769class AddWeightsCmd(CmdBase, WithSource, WithSummaryLogging): 

770 """Add additional weights to a model description by converting from available formats.""" 

771 

772 output: CliPositionalArg[Path] 

773 """The path to write the updated model package to.""" 

774 

775 source_format: Optional[SupportedWeightsFormat] = Field(None, alias="source-format") 

776 """Exclusively use these weights to convert to other formats.""" 

777 

778 target_format: Optional[SupportedWeightsFormat] = Field(None, alias="target-format") 

779 """Exclusively add this weight format.""" 

780 

781 verbose: bool = False 

782 """Log more (error) output.""" 

783 

784 tracing: bool = True 

785 """Allow tracing when converting pytorch_state_dict to torchscript 

786 (still uses scripting if possible).""" 

787 

788 def cli_cmd(self): 

789 model_descr = ensure_description_is_model(self.descr) 

790 if isinstance(model_descr, v0_4.ModelDescr): 

791 raise TypeError( 

792 f"model format {model_descr.format_version} not supported." 

793 + " Please update the model first." 

794 ) 

795 updated_model_descr = add_weights( 

796 model_descr, 

797 output_path=self.output, 

798 source_format=self.source_format, 

799 target_format=self.target_format, 

800 verbose=self.verbose, 

801 allow_tracing=self.tracing, 

802 ) 

803 self.log(updated_model_descr) 

804 

805 

806JSON_FILE = "bioimageio-cli.json" 

807YAML_FILE = "bioimageio-cli.yaml" 

808 

809 

810class Bioimageio( 

811 BaseSettings, 

812 cli_implicit_flags=True, 

813 cli_parse_args=True, 

814 cli_prog_name="bioimageio", 

815 cli_use_class_docs_for_groups=True, 

816 use_attribute_docstrings=True, 

817): 

818 """bioimageio - CLI for bioimage.io resources 🦒""" 

819 

820 model_config = SettingsConfigDict( 

821 json_file=JSON_FILE, 

822 yaml_file=YAML_FILE, 

823 ) 

824 

825 validate_format: CliSubCommand[ValidateFormatCmd] = Field(alias="validate-format") 

826 "Check a resource's metadata format" 

827 

828 test: CliSubCommand[TestCmd] 

829 "Test a bioimageio resource (beyond meta data formatting)" 

830 

831 package: CliSubCommand[PackageCmd] 

832 "Package a resource" 

833 

834 predict: CliSubCommand[PredictCmd] 

835 "Predict with a model resource" 

836 

837 update_format: CliSubCommand[UpdateFormatCmd] = Field(alias="update-format") 

838 """Update the metadata format""" 

839 

840 update_hashes: CliSubCommand[UpdateHashesCmd] = Field(alias="update-hashes") 

841 """Create a bioimageio.yaml description with updated file hashes.""" 

842 

843 add_weights: CliSubCommand[AddWeightsCmd] = Field(alias="add-weights") 

844 """Add additional weights to a model description by converting from available formats.""" 

845 

846 @classmethod 

847 def settings_customise_sources( 

848 cls, 

849 settings_cls: Type[BaseSettings], 

850 init_settings: PydanticBaseSettingsSource, 

851 env_settings: PydanticBaseSettingsSource, 

852 dotenv_settings: PydanticBaseSettingsSource, 

853 file_secret_settings: PydanticBaseSettingsSource, 

854 ) -> Tuple[PydanticBaseSettingsSource, ...]: 

855 cli: CliSettingsSource[BaseSettings] = CliSettingsSource( 

856 settings_cls, 

857 cli_parse_args=True, 

858 formatter_class=RawTextHelpFormatter, 

859 ) 

860 sys_args = pformat(sys.argv) 

861 logger.info("starting CLI with arguments:\n{}", sys_args) 

862 return ( 

863 cli, 

864 init_settings, 

865 YamlConfigSettingsSource(settings_cls), 

866 JsonConfigSettingsSource(settings_cls), 

867 ) 

868 

869 @model_validator(mode="before") 

870 @classmethod 

871 def _log(cls, data: Any): 

872 logger.info( 

873 "loaded CLI input:\n{}", 

874 pformat({k: v for k, v in data.items() if v is not None}), 

875 ) 

876 return data 

877 

878 def cli_cmd(self) -> None: 

879 logger.info( 

880 "executing CLI command:\n{}", 

881 pformat({k: v for k, v in self.model_dump().items() if v is not None}), 

882 ) 

883 _ = CliApp.run_subcommand(self) 

884 

885 

886assert isinstance(Bioimageio.__doc__, str) 

887Bioimageio.__doc__ += f""" 

888 

889library versions: 

890 bioimageio.core {__version__} 

891 bioimageio.spec {bioimageio.spec.__version__} 

892 

893spec format versions: 

894 model RDF {ModelDescr.implemented_format_version} 

895 dataset RDF {DatasetDescr.implemented_format_version} 

896 notebook RDF {NotebookDescr.implemented_format_version} 

897 

898""" 

899 

900 

901def _get_sample_ids( 

902 input_paths: Sequence[Mapping[MemberId, Path]], 

903) -> Sequence[SampleId]: 

904 """Get sample ids for given input paths, based on the common path per sample. 

905 

906 Falls back to sample01, samle02, etc...""" 

907 

908 matcher = SequenceMatcher() 

909 

910 def get_common_seq(seqs: Sequence[Sequence[str]]) -> Sequence[str]: 

911 """extract a common sequence from multiple sequences 

912 (order sensitive; strips whitespace and slashes) 

913 """ 

914 common = seqs[0] 

915 

916 for seq in seqs[1:]: 

917 if not seq: 

918 continue 

919 matcher.set_seqs(common, seq) 

920 i, _, size = matcher.find_longest_match() 

921 common = common[i : i + size] 

922 

923 if isinstance(common, str): 

924 common = common.strip().strip("/") 

925 else: 

926 common = [cs for c in common if (cs := c.strip().strip("/"))] 

927 

928 if not common: 

929 raise ValueError(f"failed to find common sequence for {seqs}") 

930 

931 return common 

932 

933 def get_shorter_diff(seqs: Sequence[Sequence[str]]) -> List[Sequence[str]]: 

934 """get a shorter sequence whose entries are still unique 

935 (order sensitive, not minimal sequence) 

936 """ 

937 min_seq_len = min(len(s) for s in seqs) 

938 # cut from the start 

939 for start in range(min_seq_len - 1, -1, -1): 

940 shortened = [s[start:] for s in seqs] 

941 if len(set(shortened)) == len(seqs): 

942 min_seq_len -= start 

943 break 

944 else: 

945 seen: Set[Sequence[str]] = set() 

946 dupes = [s for s in seqs if s in seen or seen.add(s)] 

947 raise ValueError(f"Found duplicate entries {dupes}") 

948 

949 # cut from the end 

950 for end in range(min_seq_len - 1, 1, -1): 

951 shortened = [s[:end] for s in shortened] 

952 if len(set(shortened)) == len(seqs): 

953 break 

954 

955 return shortened 

956 

957 full_tensor_ids = [ 

958 sorted( 

959 p.resolve().with_suffix("").as_posix() for p in input_sample_paths.values() 

960 ) 

961 for input_sample_paths in input_paths 

962 ] 

963 try: 

964 long_sample_ids = [get_common_seq(t) for t in full_tensor_ids] 

965 sample_ids = get_shorter_diff(long_sample_ids) 

966 except ValueError as e: 

967 raise ValueError(f"failed to extract sample ids: {e}") 

968 

969 return sample_ids