Skip to content

infer_tiles

Run inference using a model and transform definition (either local or using torch.hub)

Decorates existing slide_tiles with additional columns corresponding to class prediction/scores from the model

Parameters:

Name Type Description Default
slide_urlpath str

url/path to slide image (virtual slide formats compatible with TiffSlide, .svs, .tif, .scn, ...)

''
tiles_urlpath str

path to a slide-tile manifest file (.tiles.csv)

''
tile_size Optional[int]

size of tiles to use (at the requested magnification)

None
filter_query str

pandas query by which to filter tiles based on their various tissue detection scores

''
requested_magnification Optional[int]

Magnification scale at which to perform computation

None
torch_model_repo_or_dir str

repository root name like (namespace/repo) at github.com to serve torch.hub models. Or path to a local model (e.g. msk-mind/luna-ml)

'???'
model_name str

torch hub model name (a nn.Module at the repo repo_name)

'???'
num_cores int

Number of cores to use for CPU parallelization

4
batch_size int

size in batch dimension to chuck inference (8-256 recommended, depending on memory usage)

8
output_urlpath str

output/working directory

'.'
force bool

overwrite outputs if they exist

False
kwargs dict

additional keywords to pass to model initialization

{}
use_gpu bool

use GPU if available

False
dask_options dict

options to pass to dask client

{}
insecure bool

insecure SSL

False
storage_options dict

storage options to pass to reading functions

{}
output_storage_options dict

storage options to pass to writing functions

{}

Returns:

Name Type Description
dict

metadata

Source code in src/luna/pathology/cli/infer_tile_labels.py
@timed
@save_metadata
def cli(
    slide_urlpath: str = "",
    tiles_urlpath: str = "",
    tile_size: Optional[int] = None,
    filter_query: str = "",
    requested_magnification: Optional[int] = None,
    torch_model_repo_or_dir: str = "???",
    model_name: str = "???",
    num_cores: int = 4,
    batch_size: int = 8,
    output_urlpath: str = ".",
    force: bool = False,
    kwargs: dict = {},
    use_gpu: bool = False,
    dask_options: dict = {},
    insecure: bool = False,
    storage_options: dict = {},
    output_storage_options: dict = {},
):
    """Run inference using a model and transform definition (either local or using torch.hub)

    Decorates existing slide_tiles with additional columns corresponding to class prediction/scores from the model

    Args:
        slide_urlpath (str): url/path to slide image (virtual slide formats compatible with TiffSlide, .svs, .tif, .scn, ...)
        tiles_urlpath (str): path to a slide-tile manifest file (.tiles.csv)
        tile_size (Optional[int]): size of tiles to use (at the requested magnification)
        filter_query (str): pandas query by which to filter tiles based on their various tissue detection scores
        requested_magnification (Optional[int]): Magnification scale at which to perform computation
        torch_model_repo_or_dir (str): repository root name like (namespace/repo) at github.com to serve torch.hub models. Or path to a local model (e.g. msk-mind/luna-ml)
        model_name (str): torch hub model name (a nn.Module at the repo repo_name)
        num_cores (int): Number of cores to use for CPU parallelization
        batch_size (int): size in batch dimension to chuck inference (8-256 recommended, depending on memory usage)
        output_urlpath (str): output/working directory
        force (bool): overwrite outputs if they exist
        kwargs (dict): additional keywords to pass to model initialization
        use_gpu (bool): use GPU if available
        dask_options (dict): options to pass to dask client
        insecure (bool): insecure SSL
        storage_options (dict): storage options to pass to reading functions
        output_storage_options (dict): storage options to pass to writing functions

    Returns:
        dict: metadata
    """
    config = get_config(vars())
    configure_dask_client(**config["dask_options"])

    if not config["slide_urlpath"] and not config["tiles_urlpath"]:
        raise fire.core.FireError("Specify either tiles_urlpath or slide_urlpath")

    if not config["tile_size"] and not config["tiles_urlpath"]:
        raise fire.core.FireError("Specify either tiles_urlpath or tile_size")

    if config["slide_urlpath"]:
        slide_id = Path(config["slide_urlpath"]).stem
    else:
        slide_id = Path(config["tiles_urlpath"]).stem.removesuffix(".tiles")

    tiles_urlpath = config["tiles_urlpath"]
    with make_temp_directory() as temp_dir:
        if not tiles_urlpath:
            tiles_result = __generate_tiles(
                config["slide_urlpath"],
                config["tile_size"],
                (Path(temp_dir) / "generate_tiles").as_uri(),
                config["force"],
                config["tile_magnification"],
                config["storage_options"],
            )
            detect_tissue_result = __detect_tissue(
                config["slide_urlpath"],
                tiles_result["tiles_url"],
                slide_id,
                config["thumbnail_magnification"],
                config["filter_query"],
                config["batch_size"],
                (Path(temp_dir) / "detect_tissue").as_uri(),
                config["force"],
                config["storage_options"],
            )
            save_tiles_result = _save_tiles(
                detect_tissue_result["tiles_urlpath"],
                config["slide_urlpath"],
                (Path(temp_dir) / "save_tiles").as_uri(),
                config["force"],
                config["batch_size"],
                config["storage_options"],
            )
            tiles_urlpath = save_tiles_result["tiles_url"]

        return __infer_tile_labels(
            tiles_urlpath,
            slide_id,
            config["output_urlpath"],
            config["force"],
            config["torch_model_repo_or_dir"],
            config["model_name"],
            config["num_cores"],
            config["batch_size"],
            config["kwargs"],
            config["use_gpu"],
            config["insecure"],
            config["storage_options"],
            config["output_storage_options"],
        )