Skip to content

Lime

Lime

Bases: Explainer

Lime explainer.

Supported Modules: Linear, Convolution, LSTM, RNN, Attention

Parameters:

Name Type Description Default
model Module

The PyTorch model for which attribution is to be computed.

required
n_samples int

Number of samples

25
baseline_fn Union[BaselineMethodOrFunction, Tuple[BaselineMethodOrFunction]]

The baseline function, accepting the attribution input, and returning the baseline accordingly.

'zeros'
feature_mask_fn Union[FeatureMaskMethodOrFunction, Tuple[FeatureMaskMethodOrFunction]

The feature mask function, accepting the attribution input, and returning the feature mask accordingly.

'felzenszwalb'
perturb_fn Optional[Callable[[Tensor], Tensor]]

The perturbation function, accepting the attribution input, and returning the perturbed value.

None
forward_arg_extractor Optional[Callable[[Tuple[Tensor]], Union[Tensor, Tuple[Tensor]]]]

A function that extracts forward arguments from the input batch(s) where the attribution scores are assigned.

None
additional_forward_arg_extractor Optional[Callable[[Tuple[Tensor]], Tuple[Tensor]]]

A secondary function that extract additional forward arguments from the input batch(s).

None
**kwargs

Keyword arguments that are forwarded to the base implementation of the Explainer

required
Reference

Marco Tulio Ribeiro, Sameer Singh, Carlos Guestrin. "Why Should I Trust You?": Explaining the Predictions of Any Classifier.

Source code in pnpxai/explainers/lime.py
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
class Lime(Explainer):
    """
    Lime explainer.

    Supported Modules: `Linear`, `Convolution`, `LSTM`, `RNN`, `Attention`

    Parameters:
        model (Module): The PyTorch model for which attribution is to be computed.
        n_samples (int): Number of samples
        baseline_fn (Union[BaselineMethodOrFunction, Tuple[BaselineMethodOrFunction]]): The baseline function, accepting the attribution input, and returning the baseline accordingly.
        feature_mask_fn (Union[FeatureMaskMethodOrFunction, Tuple[FeatureMaskMethodOrFunction]): The feature mask function, accepting the attribution input, and returning the feature mask accordingly.
        perturb_fn (Optional[Callable[[Tensor], Tensor]]): The perturbation function, accepting the attribution input, and returning the perturbed value.
        forward_arg_extractor: A function that extracts forward arguments from the input batch(s) where the attribution scores are assigned.
        additional_forward_arg_extractor: A secondary function that extract additional forward arguments from the input batch(s).
        **kwargs: Keyword arguments that are forwarded to the base implementation of the Explainer

    Reference:
        Marco Tulio Ribeiro, Sameer Singh, Carlos Guestrin. "Why Should I Trust You?": Explaining the Predictions of Any Classifier.
    """

    SUPPORTED_MODULES = [Linear, Convolution, LSTM, RNN, Attention]

    def __init__(
        self,
        model: Module,
        n_samples: int = 25,
        baseline_fn: Union[BaselineMethodOrFunction,
                           Tuple[BaselineMethodOrFunction]] = 'zeros',
        feature_mask_fn: Union[FeatureMaskMethodOrFunction,
                               Tuple[FeatureMaskMethodOrFunction]] = 'felzenszwalb',
        perturb_fn: Optional[Callable[[Tensor], Tensor]] = None,
        forward_arg_extractor: Optional[Callable[[
            Tuple[Tensor]], Union[Tensor, Tuple[Tensor]]]] = None,
        additional_forward_arg_extractor: Optional[Callable[[
            Tuple[Tensor]], Tuple[Tensor]]] = None,
    ) -> None:
        super().__init__(model, forward_arg_extractor, additional_forward_arg_extractor)
        self.baseline_fn = baseline_fn or torch.zeros_like
        self.feature_mask_fn = feature_mask_fn
        self.perturb_fn = perturb_fn
        self.n_samples = n_samples

    def attribute(
            self,
            inputs: Tensor,
            targets: Optional[Tensor] = None,
    ) -> Union[Tensor, Tuple[Tensor]]:
        """
        Computes attributions for the given inputs and targets.

        Args:
            inputs (torch.Tensor): The input data.
            targets (torch.Tensor): The target labels for the inputs.

        Returns:
            Union[torch.Tensor, Tuple[torch.Tensor]]: The result of the explanation.
        """
        forward_args, additional_forward_args = self._extract_forward_args(
            inputs)
        forward_args = format_into_tuple(forward_args)

        explainer = CaptumLime(self.model, perturb_func=self.perturb_fn)
        attrs = explainer.attribute(
            inputs=forward_args,
            target=targets,
            baselines=self._get_baselines(forward_args),
            feature_mask=self._get_feature_masks(forward_args),
            n_samples=self.n_samples,
            additional_forward_args=additional_forward_args,
        )
        if isinstance(attrs, tuple) and len(attrs) == 1:
            attrs = attrs[0]
        return attrs


    def get_tunables(self) -> Dict[str, Tuple[type, Dict]]:
        """
        Provides Tunable parameters for the optimizer

        Tunable parameters:
            `n_samples` (int): Value can be selected in the range of `range(10, 100, 10)`

            `baseline_fn` (callable): BaselineFunction selects suitable values in accordance with the modality

            `feature_mask_fn` (callable): FeatureMaskFunction selects suitable values in accordance with the modality
        """
        return {
            'n_samples': (int, {'low': 10, 'high': 100, 'step': 10}),
            'baseline_fn': (BaselineFunction, {}),
            'feature_mask_fn': (FeatureMaskFunction, {})
        }

attribute(inputs, targets=None)

Computes attributions for the given inputs and targets.

Parameters:

Name Type Description Default
inputs Tensor

The input data.

required
targets Tensor

The target labels for the inputs.

None

Returns:

Type Description
Union[Tensor, Tuple[Tensor]]

Union[torch.Tensor, Tuple[torch.Tensor]]: The result of the explanation.

Source code in pnpxai/explainers/lime.py
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
def attribute(
        self,
        inputs: Tensor,
        targets: Optional[Tensor] = None,
) -> Union[Tensor, Tuple[Tensor]]:
    """
    Computes attributions for the given inputs and targets.

    Args:
        inputs (torch.Tensor): The input data.
        targets (torch.Tensor): The target labels for the inputs.

    Returns:
        Union[torch.Tensor, Tuple[torch.Tensor]]: The result of the explanation.
    """
    forward_args, additional_forward_args = self._extract_forward_args(
        inputs)
    forward_args = format_into_tuple(forward_args)

    explainer = CaptumLime(self.model, perturb_func=self.perturb_fn)
    attrs = explainer.attribute(
        inputs=forward_args,
        target=targets,
        baselines=self._get_baselines(forward_args),
        feature_mask=self._get_feature_masks(forward_args),
        n_samples=self.n_samples,
        additional_forward_args=additional_forward_args,
    )
    if isinstance(attrs, tuple) and len(attrs) == 1:
        attrs = attrs[0]
    return attrs

get_tunables()

Provides Tunable parameters for the optimizer

Tunable parameters

n_samples (int): Value can be selected in the range of range(10, 100, 10)

baseline_fn (callable): BaselineFunction selects suitable values in accordance with the modality

feature_mask_fn (callable): FeatureMaskFunction selects suitable values in accordance with the modality

Source code in pnpxai/explainers/lime.py
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
def get_tunables(self) -> Dict[str, Tuple[type, Dict]]:
    """
    Provides Tunable parameters for the optimizer

    Tunable parameters:
        `n_samples` (int): Value can be selected in the range of `range(10, 100, 10)`

        `baseline_fn` (callable): BaselineFunction selects suitable values in accordance with the modality

        `feature_mask_fn` (callable): FeatureMaskFunction selects suitable values in accordance with the modality
    """
    return {
        'n_samples': (int, {'low': 10, 'high': 100, 'step': 10}),
        'baseline_fn': (BaselineFunction, {}),
        'feature_mask_fn': (FeatureMaskFunction, {})
    }