Coverage for bioimageio/core/cli.py: 83%
368 statements
« prev ^ index » next coverage.py v7.9.2, created at 2025-07-16 15:20 +0000
« prev ^ index » next coverage.py v7.9.2, created at 2025-07-16 15:20 +0000
1"""bioimageio CLI
3Note: Some docstrings use a hair space ' '
4 to place the added '(default: ...)' on a new line.
5"""
7import json
8import shutil
9import subprocess
10import sys
11from abc import ABC
12from argparse import RawTextHelpFormatter
13from difflib import SequenceMatcher
14from functools import cached_property
15from io import StringIO
16from pathlib import Path
17from pprint import pformat, pprint
18from typing import (
19 Any,
20 Dict,
21 Iterable,
22 List,
23 Literal,
24 Mapping,
25 Optional,
26 Sequence,
27 Set,
28 Tuple,
29 Type,
30 Union,
31)
33import rich.markdown
34from loguru import logger
35from pydantic import AliasChoices, BaseModel, Field, model_validator
36from pydantic_settings import (
37 BaseSettings,
38 CliPositionalArg,
39 CliSettingsSource,
40 CliSubCommand,
41 JsonConfigSettingsSource,
42 PydanticBaseSettingsSource,
43 SettingsConfigDict,
44 YamlConfigSettingsSource,
45)
46from tqdm import tqdm
47from typing_extensions import assert_never
49import bioimageio.spec
50from bioimageio.spec import (
51 AnyModelDescr,
52 InvalidDescr,
53 ResourceDescr,
54 load_description,
55 save_bioimageio_yaml_only,
56 settings,
57 update_format,
58 update_hashes,
59)
60from bioimageio.spec._internal.io import is_yaml_value
61from bioimageio.spec._internal.io_utils import open_bioimageio_yaml
62from bioimageio.spec._internal.types import FormatVersionPlaceholder, NotEmpty
63from bioimageio.spec.dataset import DatasetDescr
64from bioimageio.spec.model import ModelDescr, v0_4, v0_5
65from bioimageio.spec.notebook import NotebookDescr
66from bioimageio.spec.utils import ensure_description_is_model, get_reader, write_yaml
68from .commands import WeightFormatArgAll, WeightFormatArgAny, package, test
69from .common import MemberId, SampleId, SupportedWeightsFormat
70from .digest_spec import get_member_ids, load_sample_for_model
71from .io import load_dataset_stat, save_dataset_stat, save_sample
72from .prediction import create_prediction_pipeline
73from .proc_setup import (
74 DatasetMeasure,
75 Measure,
76 MeasureValue,
77 StatsCalculator,
78 get_required_dataset_measures,
79)
80from .sample import Sample
81from .stat_measures import Stat
82from .utils import VERSION, compare
83from .weight_converters._add_weights import add_weights
85WEIGHT_FORMAT_ALIASES = AliasChoices(
86 "weight-format",
87 "weights-format",
88 "weight_format",
89 "weights_format",
90)
93class CmdBase(BaseModel, use_attribute_docstrings=True, cli_implicit_flags=True):
94 pass
97class ArgMixin(BaseModel, use_attribute_docstrings=True, cli_implicit_flags=True):
98 pass
101class WithSummaryLogging(ArgMixin):
102 summary: Sequence[Union[Literal["display"], Path]] = Field(
103 ("display",),
104 examples=[
105 "display",
106 Path("summary.md"),
107 Path("bioimageio_summaries/"),
108 ["display", Path("summary.md")],
109 ],
110 )
111 """Display the validation summary or save it as JSON, Markdown or HTML.
112 The format is chosen based on the suffix: `.json`, `.md`, `.html`.
113 If a folder is given (path w/o suffix) the summary is saved in all formats.
114 Choose/add `"display"` to render the validation summary to the terminal.
115 """
117 def log(self, descr: Union[ResourceDescr, InvalidDescr]):
118 _ = descr.validation_summary.log(self.summary)
121class WithSource(ArgMixin):
122 source: CliPositionalArg[str]
123 """Url/path to a (folder with a) bioimageio.yaml/rdf.yaml file
124 or a bioimage.io resource identifier, e.g. 'affable-shark'"""
126 @cached_property
127 def descr(self):
128 return load_description(self.source)
130 @property
131 def descr_id(self) -> str:
132 """a more user-friendly description id
133 (replacing legacy ids with their nicknames)
134 """
135 if isinstance(self.descr, InvalidDescr):
136 return str(getattr(self.descr, "id", getattr(self.descr, "name")))
138 nickname = None
139 if (
140 isinstance(self.descr.config, v0_5.Config)
141 and (bio_config := self.descr.config.bioimageio)
142 and bio_config.model_extra is not None
143 ):
144 nickname = bio_config.model_extra.get("nickname")
146 return str(nickname or self.descr.id or self.descr.name)
149class ValidateFormatCmd(CmdBase, WithSource, WithSummaryLogging):
150 """Validate the meta data format of a bioimageio resource."""
152 perform_io_checks: bool = Field(
153 settings.perform_io_checks, alias="perform-io-checks"
154 )
155 """Wether or not to perform validations that requires downloading remote files.
156 Note: Default value is set by `BIOIMAGEIO_PERFORM_IO_CHECKS` environment variable.
157 """
159 @cached_property
160 def descr(self):
161 return load_description(self.source, perform_io_checks=self.perform_io_checks)
163 def run(self):
164 self.log(self.descr)
165 sys.exit(
166 0
167 if self.descr.validation_summary.status in ("valid-format", "passed")
168 else 1
169 )
172class TestCmd(CmdBase, WithSource, WithSummaryLogging):
173 """Test a bioimageio resource (beyond meta data formatting)."""
175 weight_format: WeightFormatArgAll = Field(
176 "all",
177 alias="weight-format",
178 validation_alias=WEIGHT_FORMAT_ALIASES,
179 )
180 """The weight format to limit testing to.
182 (only relevant for model resources)"""
184 devices: Optional[Union[str, Sequence[str]]] = None
185 """Device(s) to use for testing"""
187 runtime_env: Union[Literal["currently-active", "as-described"], Path] = Field(
188 "currently-active", alias="runtime-env"
189 )
190 """The python environment to run the tests in
191 - `"currently-active"`: use active Python interpreter
192 - `"as-described"`: generate a conda environment YAML file based on the model
193 weights description.
194 - A path to a conda environment YAML.
195 Note: The `bioimageio.core` dependency will be added automatically if not present.
196 """
198 determinism: Literal["seed_only", "full"] = "seed_only"
199 """Modes to improve reproducibility of test outputs."""
201 stop_early: bool = Field(
202 False, alias="stop-early", validation_alias=AliasChoices("stop-early", "x")
203 )
204 """Do not run further subtests after a failed one."""
206 format_version: Union[FormatVersionPlaceholder, str] = Field(
207 "discover", alias="format-version"
208 )
209 """The format version to use for testing.
210 - 'latest': Use the latest implemented format version for the given resource type (may trigger auto updating)
211 - 'discover': Use the format version as described in the resource description
212 - '0.4', '0.5', ...: Use the specified format version (may trigger auto updating)
213 """
215 def run(self):
216 sys.exit(
217 test(
218 self.descr,
219 weight_format=self.weight_format,
220 devices=self.devices,
221 summary=self.summary,
222 runtime_env=self.runtime_env,
223 determinism=self.determinism,
224 format_version=self.format_version,
225 )
226 )
229class PackageCmd(CmdBase, WithSource, WithSummaryLogging):
230 """Save a resource's metadata with its associated files."""
232 path: CliPositionalArg[Path]
233 """The path to write the (zipped) package to.
234 If it does not have a `.zip` suffix
235 this command will save the package as an unzipped folder instead."""
237 weight_format: WeightFormatArgAll = Field(
238 "all",
239 alias="weight-format",
240 validation_alias=WEIGHT_FORMAT_ALIASES,
241 )
242 """The weight format to include in the package (for model descriptions only)."""
244 def run(self):
245 if isinstance(self.descr, InvalidDescr):
246 self.log(self.descr)
247 raise ValueError(f"Invalid {self.descr.type} description.")
249 sys.exit(
250 package(
251 self.descr,
252 self.path,
253 weight_format=self.weight_format,
254 )
255 )
258def _get_stat(
259 model_descr: AnyModelDescr,
260 dataset: Iterable[Sample],
261 dataset_length: int,
262 stats_path: Path,
263) -> Mapping[DatasetMeasure, MeasureValue]:
264 req_dataset_meas, _ = get_required_dataset_measures(model_descr)
265 if not req_dataset_meas:
266 return {}
268 req_dataset_meas, _ = get_required_dataset_measures(model_descr)
270 if stats_path.exists():
271 logger.info("loading precomputed dataset measures from {}", stats_path)
272 stat = load_dataset_stat(stats_path)
273 for m in req_dataset_meas:
274 if m not in stat:
275 raise ValueError(f"Missing {m} in {stats_path}")
277 return stat
279 stats_calc = StatsCalculator(req_dataset_meas)
281 for sample in tqdm(
282 dataset, total=dataset_length, desc="precomputing dataset stats", unit="sample"
283 ):
284 stats_calc.update(sample)
286 stat = stats_calc.finalize()
287 save_dataset_stat(stat, stats_path)
289 return stat
292class UpdateCmdBase(CmdBase, WithSource, ABC):
293 output: Union[Literal["display", "stdout"], Path] = "display"
294 """Output updated bioimageio.yaml to the terminal or write to a file.
295 Notes:
296 - `"display"`: Render to the terminal with syntax highlighting.
297 - `"stdout"`: Write to sys.stdout without syntax highligthing.
298 (More convenient for copying the updated bioimageio.yaml from the terminal.)
299 """
301 diff: Union[bool, Path] = Field(True, alias="diff")
302 """Output a diff of original and updated bioimageio.yaml.
303 If a given path has an `.html` extension, a standalone HTML file is written,
304 otherwise the diff is saved in unified diff format (pure text).
305 """
307 exclude_unset: bool = Field(True, alias="exclude-unset")
308 """Exclude fields that have not explicitly be set."""
310 exclude_defaults: bool = Field(False, alias="exclude-defaults")
311 """Exclude fields that have the default value (even if set explicitly)."""
313 @cached_property
314 def updated(self) -> Union[ResourceDescr, InvalidDescr]:
315 raise NotImplementedError
317 def run(self):
318 original_yaml = open_bioimageio_yaml(self.source).unparsed_content
319 assert isinstance(original_yaml, str)
320 stream = StringIO()
322 save_bioimageio_yaml_only(
323 self.updated,
324 stream,
325 exclude_unset=self.exclude_unset,
326 exclude_defaults=self.exclude_defaults,
327 )
328 updated_yaml = stream.getvalue()
330 diff = compare(
331 original_yaml.split("\n"),
332 updated_yaml.split("\n"),
333 diff_format=(
334 "html"
335 if isinstance(self.diff, Path) and self.diff.suffix == ".html"
336 else "unified"
337 ),
338 )
340 if isinstance(self.diff, Path):
341 _ = self.diff.write_text(diff, encoding="utf-8")
342 elif self.diff:
343 console = rich.console.Console()
344 diff_md = f"## Diff\n\n````````diff\n{diff}\n````````"
345 console.print(rich.markdown.Markdown(diff_md))
347 if isinstance(self.output, Path):
348 _ = self.output.write_text(updated_yaml, encoding="utf-8")
349 logger.info(f"written updated description to {self.output}")
350 elif self.output == "display":
351 updated_md = f"## Updated bioimageio.yaml\n\n```yaml\n{updated_yaml}\n```"
352 rich.console.Console().print(rich.markdown.Markdown(updated_md))
353 elif self.output == "stdout":
354 print(updated_yaml)
355 else:
356 assert_never(self.output)
358 if isinstance(self.updated, InvalidDescr):
359 logger.warning("Update resulted in invalid description")
360 _ = self.updated.validation_summary.display()
363class UpdateFormatCmd(UpdateCmdBase):
364 """Update the metadata format to the latest format version."""
366 exclude_defaults: bool = Field(True, alias="exclude-defaults")
367 """Exclude fields that have the default value (even if set explicitly).
369 Note:
370 The update process sets most unset fields explicitly with their default value.
371 """
373 perform_io_checks: bool = Field(
374 settings.perform_io_checks, alias="perform-io-checks"
375 )
376 """Wether or not to attempt validation that may require file download.
377 If `True` file hash values are added if not present."""
379 @cached_property
380 def updated(self):
381 return update_format(
382 self.source,
383 exclude_defaults=self.exclude_defaults,
384 perform_io_checks=self.perform_io_checks,
385 )
388class UpdateHashesCmd(UpdateCmdBase):
389 """Create a bioimageio.yaml description with updated file hashes."""
391 @cached_property
392 def updated(self):
393 return update_hashes(self.source)
396class PredictCmd(CmdBase, WithSource):
397 """Run inference on your data with a bioimage.io model."""
399 inputs: NotEmpty[Sequence[Union[str, NotEmpty[Tuple[str, ...]]]]] = (
400 "{input_id}/001.tif",
401 )
402 """Model input sample paths (for each input tensor)
404 The input paths are expected to have shape...
405 - (n_samples,) or (n_samples,1) for models expecting a single input tensor
406 - (n_samples,) containing the substring '{input_id}', or
407 - (n_samples, n_model_inputs) to provide each input tensor path explicitly.
409 All substrings that are replaced by metadata from the model description:
410 - '{model_id}'
411 - '{input_id}'
413 Example inputs to process sample 'a' and 'b'
414 for a model expecting a 'raw' and a 'mask' input tensor:
415 --inputs="[[\\"a_raw.tif\\",\\"a_mask.tif\\"],[\\"b_raw.tif\\",\\"b_mask.tif\\"]]"
416 (Note that JSON double quotes need to be escaped.)
418 Alternatively a `bioimageio-cli.yaml` (or `bioimageio-cli.json`) file
419 may provide the arguments, e.g.:
420 ```yaml
421 inputs:
422 - [a_raw.tif, a_mask.tif]
423 - [b_raw.tif, b_mask.tif]
424 ```
426 `.npy` and any file extension supported by imageio are supported.
427 Aavailable formats are listed at
428 https://imageio.readthedocs.io/en/stable/formats/index.html#all-formats.
429 Some formats have additional dependencies.
432 """
434 outputs: Union[str, NotEmpty[Tuple[str, ...]]] = (
435 "outputs_{model_id}/{output_id}/{sample_id}.tif"
436 )
437 """Model output path pattern (per output tensor)
439 All substrings that are replaced:
440 - '{model_id}' (from model description)
441 - '{output_id}' (from model description)
442 - '{sample_id}' (extracted from input paths)
445 """
447 overwrite: bool = False
448 """allow overwriting existing output files"""
450 blockwise: bool = False
451 """process inputs blockwise"""
453 stats: Path = Path("dataset_statistics.json")
454 """path to dataset statistics
455 (will be written if it does not exist,
456 but the model requires statistical dataset measures)
457 """
459 preview: bool = False
460 """preview which files would be processed
461 and what outputs would be generated."""
463 weight_format: WeightFormatArgAny = Field(
464 "any",
465 alias="weight-format",
466 validation_alias=WEIGHT_FORMAT_ALIASES,
467 )
468 """The weight format to use."""
470 example: bool = False
471 """generate and run an example
473 1. downloads example model inputs
474 2. creates a `{model_id}_example` folder
475 3. writes input arguments to `{model_id}_example/bioimageio-cli.yaml`
476 4. executes a preview dry-run
477 5. executes prediction with example input
480 """
482 def _example(self):
483 model_descr = ensure_description_is_model(self.descr)
484 input_ids = get_member_ids(model_descr.inputs)
485 example_inputs = (
486 model_descr.sample_inputs
487 if isinstance(model_descr, v0_4.ModelDescr)
488 else [ipt.sample_tensor or ipt.test_tensor for ipt in model_descr.inputs]
489 )
490 if not example_inputs:
491 raise ValueError(f"{self.descr_id} does not specify any example inputs.")
493 inputs001: List[str] = []
494 example_path = Path(f"{self.descr_id}_example")
495 example_path.mkdir(exist_ok=True)
497 for t, src in zip(input_ids, example_inputs):
498 reader = get_reader(src)
499 dst = Path(f"{example_path}/{t}/001{reader.suffix}")
500 dst.parent.mkdir(parents=True, exist_ok=True)
501 inputs001.append(dst.as_posix())
502 with dst.open("wb") as f:
503 shutil.copyfileobj(reader, f)
505 inputs = [inputs001]
506 output_pattern = f"{example_path}/outputs/{{output_id}}/{{sample_id}}.tif"
508 bioimageio_cli_path = example_path / YAML_FILE
509 stats_file = "dataset_statistics.json"
510 stats = (example_path / stats_file).as_posix()
511 cli_example_args = dict(
512 inputs=inputs,
513 outputs=output_pattern,
514 stats=stats_file,
515 blockwise=self.blockwise,
516 )
517 assert is_yaml_value(cli_example_args), cli_example_args
518 write_yaml(
519 cli_example_args,
520 bioimageio_cli_path,
521 )
523 yaml_file_content = None
525 # escaped double quotes
526 inputs_json = json.dumps(inputs)
527 inputs_escaped = inputs_json.replace('"', r"\"")
528 source_escaped = self.source.replace('"', r"\"")
530 def get_example_command(preview: bool, escape: bool = False):
531 q: str = '"' if escape else ""
533 return [
534 "bioimageio",
535 "predict",
536 # --no-preview not supported for py=3.8
537 *(["--preview"] if preview else []),
538 "--overwrite",
539 *(["--blockwise"] if self.blockwise else []),
540 f"--stats={q}{stats}{q}",
541 f"--inputs={q}{inputs_escaped if escape else inputs_json}{q}",
542 f"--outputs={q}{output_pattern}{q}",
543 f"{q}{source_escaped if escape else self.source}{q}",
544 ]
546 if Path(YAML_FILE).exists():
547 logger.info(
548 "temporarily removing '{}' to execute example prediction", YAML_FILE
549 )
550 yaml_file_content = Path(YAML_FILE).read_bytes()
551 Path(YAML_FILE).unlink()
553 try:
554 _ = subprocess.run(get_example_command(True), check=True)
555 _ = subprocess.run(get_example_command(False), check=True)
556 finally:
557 if yaml_file_content is not None:
558 _ = Path(YAML_FILE).write_bytes(yaml_file_content)
559 logger.debug("restored '{}'", YAML_FILE)
561 print(
562 "🎉 Sucessfully ran example prediction!\n"
563 + "To predict the example input using the CLI example config file"
564 + f" {example_path/YAML_FILE}, execute `bioimageio predict` from {example_path}:\n"
565 + f"$ cd {str(example_path)}\n"
566 + f'$ bioimageio predict "{source_escaped}"\n\n'
567 + "Alternatively run the following command"
568 + " in the current workind directory, not the example folder:\n$ "
569 + " ".join(get_example_command(False, escape=True))
570 + f"\n(note that a local '{JSON_FILE}' or '{YAML_FILE}' may interfere with this)"
571 )
573 def run(self):
574 if self.example:
575 return self._example()
577 model_descr = ensure_description_is_model(self.descr)
579 input_ids = get_member_ids(model_descr.inputs)
580 output_ids = get_member_ids(model_descr.outputs)
582 minimum_input_ids = tuple(
583 str(ipt.id) if isinstance(ipt, v0_5.InputTensorDescr) else str(ipt.name)
584 for ipt in model_descr.inputs
585 if not isinstance(ipt, v0_5.InputTensorDescr) or not ipt.optional
586 )
587 maximum_input_ids = tuple(
588 str(ipt.id) if isinstance(ipt, v0_5.InputTensorDescr) else str(ipt.name)
589 for ipt in model_descr.inputs
590 )
592 def expand_inputs(i: int, ipt: Union[str, Tuple[str, ...]]) -> Tuple[str, ...]:
593 if isinstance(ipt, str):
594 ipts = tuple(
595 ipt.format(model_id=self.descr_id, input_id=t) for t in input_ids
596 )
597 else:
598 ipts = tuple(
599 p.format(model_id=self.descr_id, input_id=t)
600 for t, p in zip(input_ids, ipt)
601 )
603 if len(set(ipts)) < len(ipts):
604 if len(minimum_input_ids) == len(maximum_input_ids):
605 n = len(minimum_input_ids)
606 else:
607 n = f"{len(minimum_input_ids)}-{len(maximum_input_ids)}"
609 raise ValueError(
610 f"[input sample #{i}] Include '{{input_id}}' in path pattern or explicitly specify {n} distinct input paths (got {ipt})"
611 )
613 if len(ipts) < len(minimum_input_ids):
614 raise ValueError(
615 f"[input sample #{i}] Expected at least {len(minimum_input_ids)} inputs {minimum_input_ids}, got {ipts}"
616 )
618 if len(ipts) > len(maximum_input_ids):
619 raise ValueError(
620 f"Expected at most {len(maximum_input_ids)} inputs {maximum_input_ids}, got {ipts}"
621 )
623 return ipts
625 inputs = [expand_inputs(i, ipt) for i, ipt in enumerate(self.inputs, start=1)]
627 sample_paths_in = [
628 {t: Path(p) for t, p in zip(input_ids, ipts)} for ipts in inputs
629 ]
631 sample_ids = _get_sample_ids(sample_paths_in)
633 def expand_outputs():
634 if isinstance(self.outputs, str):
635 outputs = [
636 tuple(
637 Path(
638 self.outputs.format(
639 model_id=self.descr_id, output_id=t, sample_id=s
640 )
641 )
642 for t in output_ids
643 )
644 for s in sample_ids
645 ]
646 else:
647 outputs = [
648 tuple(
649 Path(p.format(model_id=self.descr_id, output_id=t, sample_id=s))
650 for t, p in zip(output_ids, self.outputs)
651 )
652 for s in sample_ids
653 ]
655 for i, out in enumerate(outputs, start=1):
656 if len(set(out)) < len(out):
657 raise ValueError(
658 f"[output sample #{i}] Include '{{output_id}}' in path pattern or explicitly specify {len(output_ids)} distinct output paths (got {out})"
659 )
661 if len(out) != len(output_ids):
662 raise ValueError(
663 f"[output sample #{i}] Expected {len(output_ids)} outputs {output_ids}, got {out}"
664 )
666 return outputs
668 outputs = expand_outputs()
670 sample_paths_out = [
671 {MemberId(t): Path(p) for t, p in zip(output_ids, out)} for out in outputs
672 ]
674 if not self.overwrite:
675 for sample_paths in sample_paths_out:
676 for p in sample_paths.values():
677 if p.exists():
678 raise FileExistsError(
679 f"{p} already exists. use --overwrite to (re-)write outputs anyway."
680 )
681 if self.preview:
682 print("🛈 bioimageio prediction preview structure:")
683 pprint(
684 {
685 "{sample_id}": dict(
686 inputs={"{input_id}": "<input path>"},
687 outputs={"{output_id}": "<output path>"},
688 )
689 }
690 )
691 print("🔎 bioimageio prediction preview output:")
692 pprint(
693 {
694 s: dict(
695 inputs={t: p.as_posix() for t, p in sp_in.items()},
696 outputs={t: p.as_posix() for t, p in sp_out.items()},
697 )
698 for s, sp_in, sp_out in zip(
699 sample_ids, sample_paths_in, sample_paths_out
700 )
701 }
702 )
703 return
705 def input_dataset(stat: Stat):
706 for s, sp_in in zip(sample_ids, sample_paths_in):
707 yield load_sample_for_model(
708 model=model_descr,
709 paths=sp_in,
710 stat=stat,
711 sample_id=s,
712 )
714 stat: Dict[Measure, MeasureValue] = dict(
715 _get_stat(
716 model_descr, input_dataset({}), len(sample_ids), self.stats
717 ).items()
718 )
720 pp = create_prediction_pipeline(
721 model_descr,
722 weight_format=None if self.weight_format == "any" else self.weight_format,
723 )
724 predict_method = (
725 pp.predict_sample_with_blocking
726 if self.blockwise
727 else pp.predict_sample_without_blocking
728 )
730 for sample_in, sp_out in tqdm(
731 zip(input_dataset(dict(stat)), sample_paths_out),
732 total=len(inputs),
733 desc=f"predict with {self.descr_id}",
734 unit="sample",
735 ):
736 sample_out = predict_method(sample_in)
737 save_sample(sp_out, sample_out)
740class AddWeightsCmd(CmdBase, WithSource, WithSummaryLogging):
741 output: CliPositionalArg[Path]
742 """The path to write the updated model package to."""
744 source_format: Optional[SupportedWeightsFormat] = Field(None, alias="source-format")
745 """Exclusively use these weights to convert to other formats."""
747 target_format: Optional[SupportedWeightsFormat] = Field(None, alias="target-format")
748 """Exclusively add this weight format."""
750 verbose: bool = False
751 """Log more (error) output."""
753 def run(self):
754 model_descr = ensure_description_is_model(self.descr)
755 if isinstance(model_descr, v0_4.ModelDescr):
756 raise TypeError(
757 f"model format {model_descr.format_version} not supported."
758 + " Please update the model first."
759 )
760 updated_model_descr = add_weights(
761 model_descr,
762 output_path=self.output,
763 source_format=self.source_format,
764 target_format=self.target_format,
765 verbose=self.verbose,
766 )
767 if updated_model_descr is None:
768 return
770 self.log(updated_model_descr)
773JSON_FILE = "bioimageio-cli.json"
774YAML_FILE = "bioimageio-cli.yaml"
777class Bioimageio(
778 BaseSettings,
779 cli_implicit_flags=True,
780 cli_parse_args=True,
781 cli_prog_name="bioimageio",
782 cli_use_class_docs_for_groups=True,
783 use_attribute_docstrings=True,
784):
785 """bioimageio - CLI for bioimage.io resources 🦒"""
787 model_config = SettingsConfigDict(
788 json_file=JSON_FILE,
789 yaml_file=YAML_FILE,
790 )
792 validate_format: CliSubCommand[ValidateFormatCmd] = Field(alias="validate-format")
793 "Check a resource's metadata format"
795 test: CliSubCommand[TestCmd]
796 "Test a bioimageio resource (beyond meta data formatting)"
798 package: CliSubCommand[PackageCmd]
799 "Package a resource"
801 predict: CliSubCommand[PredictCmd]
802 "Predict with a model resource"
804 update_format: CliSubCommand[UpdateFormatCmd] = Field(alias="update-format")
805 """Update the metadata format"""
807 update_hashes: CliSubCommand[UpdateHashesCmd] = Field(alias="update-hashes")
808 """Create a bioimageio.yaml description with updated file hashes."""
810 add_weights: CliSubCommand[AddWeightsCmd] = Field(alias="add-weights")
811 """Add additional weights to the model descriptions converted from available
812 formats to improve deployability."""
814 @classmethod
815 def settings_customise_sources(
816 cls,
817 settings_cls: Type[BaseSettings],
818 init_settings: PydanticBaseSettingsSource,
819 env_settings: PydanticBaseSettingsSource,
820 dotenv_settings: PydanticBaseSettingsSource,
821 file_secret_settings: PydanticBaseSettingsSource,
822 ) -> Tuple[PydanticBaseSettingsSource, ...]:
823 cli: CliSettingsSource[BaseSettings] = CliSettingsSource(
824 settings_cls,
825 cli_parse_args=True,
826 formatter_class=RawTextHelpFormatter,
827 )
828 sys_args = pformat(sys.argv)
829 logger.info("starting CLI with arguments:\n{}", sys_args)
830 return (
831 cli,
832 init_settings,
833 YamlConfigSettingsSource(settings_cls),
834 JsonConfigSettingsSource(settings_cls),
835 )
837 @model_validator(mode="before")
838 @classmethod
839 def _log(cls, data: Any):
840 logger.info(
841 "loaded CLI input:\n{}",
842 pformat({k: v for k, v in data.items() if v is not None}),
843 )
844 return data
846 def run(self):
847 logger.info(
848 "executing CLI command:\n{}",
849 pformat({k: v for k, v in self.model_dump().items() if v is not None}),
850 )
851 cmd = (
852 self.add_weights
853 or self.package
854 or self.predict
855 or self.test
856 or self.update_format
857 or self.update_hashes
858 or self.validate_format
859 )
860 assert cmd is not None
861 cmd.run()
864assert isinstance(Bioimageio.__doc__, str)
865Bioimageio.__doc__ += f"""
867library versions:
868 bioimageio.core {VERSION}
869 bioimageio.spec {bioimageio.spec.__version__}
871spec format versions:
872 model RDF {ModelDescr.implemented_format_version}
873 dataset RDF {DatasetDescr.implemented_format_version}
874 notebook RDF {NotebookDescr.implemented_format_version}
876"""
879def _get_sample_ids(
880 input_paths: Sequence[Mapping[MemberId, Path]],
881) -> Sequence[SampleId]:
882 """Get sample ids for given input paths, based on the common path per sample.
884 Falls back to sample01, samle02, etc..."""
886 matcher = SequenceMatcher()
888 def get_common_seq(seqs: Sequence[Sequence[str]]) -> Sequence[str]:
889 """extract a common sequence from multiple sequences
890 (order sensitive; strips whitespace and slashes)
891 """
892 common = seqs[0]
894 for seq in seqs[1:]:
895 if not seq:
896 continue
897 matcher.set_seqs(common, seq)
898 i, _, size = matcher.find_longest_match()
899 common = common[i : i + size]
901 if isinstance(common, str):
902 common = common.strip().strip("/")
903 else:
904 common = [cs for c in common if (cs := c.strip().strip("/"))]
906 if not common:
907 raise ValueError(f"failed to find common sequence for {seqs}")
909 return common
911 def get_shorter_diff(seqs: Sequence[Sequence[str]]) -> List[Sequence[str]]:
912 """get a shorter sequence whose entries are still unique
913 (order sensitive, not minimal sequence)
914 """
915 min_seq_len = min(len(s) for s in seqs)
916 # cut from the start
917 for start in range(min_seq_len - 1, -1, -1):
918 shortened = [s[start:] for s in seqs]
919 if len(set(shortened)) == len(seqs):
920 min_seq_len -= start
921 break
922 else:
923 seen: Set[Sequence[str]] = set()
924 dupes = [s for s in seqs if s in seen or seen.add(s)]
925 raise ValueError(f"Found duplicate entries {dupes}")
927 # cut from the end
928 for end in range(min_seq_len - 1, 1, -1):
929 shortened = [s[:end] for s in shortened]
930 if len(set(shortened)) == len(seqs):
931 break
933 return shortened
935 full_tensor_ids = [
936 sorted(
937 p.resolve().with_suffix("").as_posix() for p in input_sample_paths.values()
938 )
939 for input_sample_paths in input_paths
940 ]
941 try:
942 long_sample_ids = [get_common_seq(t) for t in full_tensor_ids]
943 sample_ids = get_shorter_diff(long_sample_ids)
944 except ValueError as e:
945 raise ValueError(f"failed to extract sample ids: {e}")
947 return sample_ids