Skip to content

velora.utils

Documentation

User Guide - Tutorials: Utilities

Generic utility methods usable in any experiment.

set_device(device='auto')

Sets the PyTorch device dynamically.

Parameters:

Name Type Description Default
device str

the name of the device to perform computations on.

When auto:

  • Set to cuda:0, if available.
  • Else, cpu.
'auto'

Returns:

Name Type Description
device torch.device

the PyTorch device.

Source code in velora/utils/core.py
Python
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
def set_device(device: str = "auto") -> torch.device:
    """
    Sets the `PyTorch` device dynamically.

    Parameters:
        device (str, optional): the name of the device to perform computations on.

            When `auto`:

            - Set to `cuda:0`, if available.
            - Else, `cpu`.

    Returns:
        device (torch.device): the `PyTorch` device.
    """
    if device == "auto":
        device = "cuda:0" if torch.cuda.is_available() else "cpu"

    return torch.device(device)

set_seed(value=None)

Sets the random seed for Python, PyTorch and NumPy. When None will create a new one automatically.

Parameters:

Name Type Description Default
value int

the seed value

None

Returns:

Name Type Description
seed int

the used seed value

Source code in velora/utils/core.py
Python
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
def set_seed(value: int | None = None) -> int:
    """
    Sets the random seed for `Python`, `PyTorch` and `NumPy`.
    When `None` will create a new one automatically.

    Parameters:
        value (int, optional): the seed value

    Returns:
        seed (int): the used seed value
    """
    if value is None:
        value = random.randint(0, 2**32 - 1)

    random.seed(value)
    torch.manual_seed(value)
    np.random.seed(value)

    return value

active_parameters(model)

Calculates the active number of parameters used in a PyTorch nn.Module. Filters out parameters that are 0.

Parameters:

Name Type Description Default
model nn.Module

a PyTorch module with parameters

required

Returns:

Name Type Description
count int

the total active number of parameters.

Source code in velora/utils/torch.py
Python
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
@torch.jit.ignore
def active_parameters(model: nn.Module) -> int:
    """
    Calculates the active number of parameters used in a PyTorch `nn.Module`.
    Filters out parameters that are `0`.

    Parameters:
        model (nn.Module): a PyTorch module with parameters

    Returns:
        count (int): the total active number of parameters.
    """
    return sum((p != 0).sum().item() for p in model.parameters() if p.requires_grad)

hard_update(source, target)

Performs a hard parameter update between two PyTorch Networks.

Parameters:

Name Type Description Default
source nn.Module

the source network

required
target nn.Module

the target network

required
Source code in velora/utils/torch.py
Python
67
68
69
70
71
72
73
74
75
76
def hard_update(source: nn.Module, target: nn.Module) -> None:
    """
    Performs a hard parameter update between two PyTorch Networks.

    Parameters:
        source (nn.Module): the source network
        target (nn.Module): the target network
    """
    for target_param, param in zip(target.parameters(), source.parameters()):
        target_param.data.copy_(param.data)

soft_update(source, target, *, tau=0.005)

Performs a soft parameter update between two PyTorch Networks.

Parameters:

Name Type Description Default
source nn.Module

the source network

required
target nn.Module

the target network

required
tau float

the soft update factor used to slowly update the target network

0.005
Source code in velora/utils/torch.py
Python
53
54
55
56
57
58
59
60
61
62
63
64
def soft_update(source: nn.Module, target: nn.Module, *, tau: float = 0.005) -> None:
    """
    Performs a soft parameter update between two PyTorch Networks.

    Parameters:
        source (nn.Module): the source network
        target (nn.Module): the target network
        tau (float, optional): the soft update factor used to slowly update
            the target network
    """
    for target_param, param in zip(target.parameters(), source.parameters()):
        target_param.data.copy_(tau * param.data + (1.0 - tau) * target_param.data)

stack_tensor(items, *, dtype=torch.float32, device=None)

Stacks a list of tensors together, then:

  1. Converts it to a specific dtype
  2. Loads it onto device

Parameters:

Name Type Description Default
items List[torch.Tensor]

a list of torch.Tensors full of items

required
dtype torch.dtype

the data type for the tensor

torch.float32
device torch.device

the device to perform computations on

None

Returns:

Name Type Description
tensor torch.Tensor

the updated torch.Tensor.

Source code in velora/utils/torch.py
Python
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
def stack_tensor(
    items: List[torch.Tensor],
    *,
    dtype: torch.dtype = torch.float32,
    device: torch.device | None = None,
) -> torch.Tensor:
    """
    Stacks a list of tensors together, then:

    1. Converts it to a specific `dtype`
    2. Loads it onto `device`

    Parameters:
        items (List[torch.Tensor]): a list of torch.Tensors full of items
        dtype (torch.dtype, optional): the data type for the tensor
        device (torch.device, optional): the device to perform computations on

    Returns:
        tensor (torch.Tensor): the updated `torch.Tensor`.
    """
    return torch.stack(items).to(dtype=dtype, device=device)

summary(module)

Outputs a summary of a module and all it's sub-modules as a dictionary.

Returns:

Name Type Description
summary Dict[str, str]

key-value pairs for the network layout.

Source code in velora/utils/torch.py
Python
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
@torch.jit.ignore
def summary(module: nn.Module) -> Dict[str, str]:
    """
    Outputs a summary of a module and all it's sub-modules as a dictionary.

    Returns:
        summary (Dict[str, str]): key-value pairs for the network layout.
    """
    model_dict = {}

    for name, mod in module.named_children():
        if len(list(mod.children())) > 0:
            # If the module has submodules, recurse
            model_dict[name] = summary(mod)
        else:
            # If it's a leaf module, store its string representation
            model_dict[name] = str(mod)

    return model_dict

to_tensor(items, *, dtype=torch.float32, device=None)

Converts a list of items to a Tensor, then:

  1. Converts it to a specific dtype
  2. Loads it onto device

Parameters:

Name Type Description Default
items List[Any]

a list of items of any type

required
dtype torch.dtype

the data type for the tensor

torch.float32
device torch.device

the device to perform computations on

None

Returns:

Name Type Description
tensor torch.Tensor

the updated torch.Tensor.

Source code in velora/utils/torch.py
Python
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
def to_tensor(
    items: List[Any],
    *,
    dtype: torch.dtype = torch.float32,
    device: torch.device | None = None,
) -> torch.Tensor:
    """
    Converts a list of items to a Tensor, then:

    1. Converts it to a specific `dtype`
    2. Loads it onto `device`

    Parameters:
        items (List[Any]): a list of items of any type
        dtype (torch.dtype, optional): the data type for the tensor
        device (torch.device, optional): the device to perform computations on

    Returns:
        tensor (torch.Tensor): the updated `torch.Tensor`.
    """
    return torch.tensor(items).to(dtype=dtype, device=device)

total_parameters(model)

Calculates the total number of parameters used in a PyTorch nn.Module.

Parameters:

Name Type Description Default
model nn.Module

a PyTorch module with parameters

required

Returns:

Name Type Description
count int

the total number of parameters.

Source code in velora/utils/torch.py
Python
79
80
81
82
83
84
85
86
87
88
89
90
@torch.jit.ignore
def total_parameters(model: nn.Module) -> int:
    """
    Calculates the total number of parameters used in a PyTorch `nn.Module`.

    Parameters:
        model (nn.Module): a PyTorch module with parameters

    Returns:
        count (int): the total number of parameters.
    """
    return sum(p.numel() for p in model.parameters() if p.requires_grad)