Skip to content

torchscript_backend ¤

Classes:

Name Description
TorchscriptModelAdapter

TorchscriptModelAdapter ¤

TorchscriptModelAdapter(*, model_description: Union[v0_4.ModelDescr, v0_5.ModelDescr], devices: Optional[Sequence[str]] = None)

Bases: ModelAdapter


              flowchart TD
              bioimageio.core.backends.torchscript_backend.TorchscriptModelAdapter[TorchscriptModelAdapter]
              bioimageio.core.backends._model_adapter.ModelAdapter[ModelAdapter]

                              bioimageio.core.backends._model_adapter.ModelAdapter --> bioimageio.core.backends.torchscript_backend.TorchscriptModelAdapter
                


              click bioimageio.core.backends.torchscript_backend.TorchscriptModelAdapter href "" "bioimageio.core.backends.torchscript_backend.TorchscriptModelAdapter"
              click bioimageio.core.backends._model_adapter.ModelAdapter href "" "bioimageio.core.backends._model_adapter.ModelAdapter"
            

Methods:

Name Description
create

Creates model adapter based on the passed spec

forward

Run forward pass of model to get model predictions

load
unload

Unload model from any devices, freeing their memory.

Attributes:

Name Type Description
devices
Source code in src/bioimageio/core/backends/torchscript_backend.py
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
def __init__(
    self,
    *,
    model_description: Union[v0_4.ModelDescr, v0_5.ModelDescr],
    devices: Optional[Sequence[str]] = None,
):
    super().__init__(model_description=model_description)
    if model_description.weights.torchscript is None:
        raise ValueError(
            f"No torchscript weights found for model {model_description.name}"
        )

    if devices is None:
        self.devices = ["cuda" if torch.cuda.is_available() else "cpu"]
    else:
        self.devices = [torch.device(d) for d in devices]

    if len(self.devices) > 1:
        warnings.warn(
            "Multiple devices for single torchscript model not yet implemented"
        )

    weight_reader = model_description.weights.torchscript.get_reader()
    self._model = torch.jit.load(weight_reader)

    self._model.to(self.devices[0])
    self._model = self._model.eval()

devices instance-attribute ¤

devices = ['cuda' if torch.cuda.is_available() else 'cpu']

create classmethod ¤

create(model_description: Union[v0_4.ModelDescr, v0_5.ModelDescr], *, devices: Optional[Sequence[str]] = None, weight_format_priority_order: Optional[Sequence[SupportedWeightsFormat]] = None)

Creates model adapter based on the passed spec Note: All specific adapters should happen inside this function to prevent different framework initializations interfering with each other

Source code in src/bioimageio/core/backends/_model_adapter.py
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
@final
@classmethod
def create(
    cls,
    model_description: Union[v0_4.ModelDescr, v0_5.ModelDescr],
    *,
    devices: Optional[Sequence[str]] = None,
    weight_format_priority_order: Optional[Sequence[SupportedWeightsFormat]] = None,
):
    """
    Creates model adapter based on the passed spec
    Note: All specific adapters should happen inside this function to prevent different framework
    initializations interfering with each other
    """
    if not isinstance(model_description, (v0_4.ModelDescr, v0_5.ModelDescr)):
        raise TypeError(
            f"expected v0_4.ModelDescr or v0_5.ModelDescr, but got {type(model_description)}"
        )

    weights = model_description.weights
    errors: List[Exception] = []
    weight_format_priority_order = (
        DEFAULT_WEIGHT_FORMAT_PRIORITY_ORDER
        if weight_format_priority_order is None
        else weight_format_priority_order
    )
    # limit weight formats to the ones present
    weight_format_priority_order_present: Sequence[SupportedWeightsFormat] = [
        w for w in weight_format_priority_order if getattr(weights, w) is not None
    ]
    if not weight_format_priority_order_present:
        raise ValueError(
            f"None of the specified weight formats ({weight_format_priority_order}) is present ({weight_format_priority_order_present})"
        )

    for wf in weight_format_priority_order_present:
        if wf == "pytorch_state_dict":
            assert weights.pytorch_state_dict is not None
            try:
                from .pytorch_backend import PytorchModelAdapter

                return PytorchModelAdapter(
                    model_description=model_description, devices=devices
                )
            except Exception as e:
                errors.append(e)
        elif wf == "tensorflow_saved_model_bundle":
            assert weights.tensorflow_saved_model_bundle is not None
            try:
                from .tensorflow_backend import create_tf_model_adapter

                return create_tf_model_adapter(
                    model_description=model_description, devices=devices
                )
            except Exception as e:
                errors.append(e)
        elif wf == "onnx":
            assert weights.onnx is not None
            try:
                from .onnx_backend import ONNXModelAdapter

                return ONNXModelAdapter(
                    model_description=model_description, devices=devices
                )
            except Exception as e:
                errors.append(e)
        elif wf == "torchscript":
            assert weights.torchscript is not None
            try:
                from .torchscript_backend import TorchscriptModelAdapter

                return TorchscriptModelAdapter(
                    model_description=model_description, devices=devices
                )
            except Exception as e:
                errors.append(e)
        elif wf == "keras_hdf5":
            assert weights.keras_hdf5 is not None
            # keras can either be installed as a separate package or used as part of tensorflow
            # we try to first import the keras model adapter using the separate package and,
            # if it is not available, try to load the one using tf
            try:
                try:
                    from .keras_backend import KerasModelAdapter
                except Exception:
                    from .tensorflow_backend import KerasModelAdapter

                return KerasModelAdapter(
                    model_description=model_description, devices=devices
                )
            except Exception as e:
                errors.append(e)
        else:
            assert_never(wf)

    assert errors
    if len(weight_format_priority_order) == 1:
        assert len(errors) == 1
        raise errors[0]

    else:
        msg = (
            "None of the weight format specific model adapters could be created"
            + " in this environment."
        )
        raise ExceptionGroup(msg, errors)

forward ¤

forward(input_sample: Union[Sample, SampleBlock, SampleBlockWithOrigin]) -> Sample

Run forward pass of model to get model predictions

Note: sample id and stample stat attributes are passed through

Source code in src/bioimageio/core/backends/_model_adapter.py
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
def forward(
    self, input_sample: Union[Sample, SampleBlock, SampleBlockWithOrigin]
) -> Sample:
    """
    Run forward pass of model to get model predictions

    Note: sample id and stample stat attributes are passed through
    """
    unexpected = [mid for mid in input_sample.members if mid not in self._input_ids]
    if unexpected:
        warnings.warn(f"Got unexpected input tensor IDs: {unexpected}")

    input_arrays = [
        (
            None
            if (a := input_sample.members.get(in_id)) is None
            else a.transpose(in_order).data.data
        )
        for in_id, in_order in zip(self._input_ids, self._input_axes)
    ]
    output_arrays = self._forward_impl(input_arrays)
    assert len(output_arrays) <= len(self._output_ids)
    output_tensors = [
        None if a is None else Tensor(a, dims=d)
        for a, d in zip(output_arrays, self._output_axes)
    ]
    return Sample(
        members={
            tid: out
            for tid, out in zip(
                self._output_ids,
                output_tensors,
            )
            if out is not None
        },
        stat=input_sample.stat,
        id=(
            input_sample.id
            if isinstance(input_sample, Sample)
            else input_sample.sample_id
        ),
    )

load ¤

load(*, devices: Optional[Sequence[str]] = None) -> None
Source code in src/bioimageio/core/backends/_model_adapter.py
178
179
180
@final
def load(self, *, devices: Optional[Sequence[str]] = None) -> None:
    warnings.warn("Deprecated. ModelAdapter is loaded on initialization")

unload ¤

unload() -> None

Unload model from any devices, freeing their memory. The moder adapter should be considered unusable afterwards.

Source code in src/bioimageio/core/backends/torchscript_backend.py
69
70
71
72
73
def unload(self) -> None:
    self._devices = None
    del self._model
    _ = gc.collect()  # deallocate memory
    torch.cuda.empty_cache()  # release reserved memory