chrisc36 commited on
Commit
8b8f7c1
1 Parent(s): 87e7a2f

Delete torch_util.py

Browse files
Files changed (1) hide show
  1. torch_util.py +0 -183
torch_util.py DELETED
@@ -1,183 +0,0 @@
1
- import gc
2
- import os
3
- import logging
4
- from typing import Optional, TypeVar, List, Tuple
5
-
6
- import torch
7
- import torch.distributed as dist
8
-
9
- T = TypeVar("T")
10
-
11
-
12
- log = logging.getLogger(__name__)
13
-
14
-
15
- def seed_all(seed: int):
16
- """Seed all rng objects."""
17
- import random
18
-
19
- import numpy as np
20
-
21
- if seed < 0 or seed > 2**32 - 1:
22
- raise ValueError(f"Seed {seed} is invalid. It must be on [0; 2^32 - 1]")
23
- random.seed(seed)
24
- np.random.seed(seed)
25
- torch.manual_seed(seed)
26
- # torch.manual_seed may call manual_seed_all but calling it again here
27
- # to make sure it gets called at least once
28
- torch.cuda.manual_seed_all(seed)
29
-
30
-
31
- def is_distributed() -> bool:
32
- return dist.is_available() and dist.is_initialized()
33
-
34
-
35
- def get_node_rank() -> int:
36
- return int(os.environ.get("NODE_RANK") or (get_global_rank() - get_local_rank()) // get_local_world_size())
37
-
38
-
39
- def get_world_size() -> int:
40
- if is_distributed():
41
- return dist.get_world_size()
42
- else:
43
- return 1
44
-
45
-
46
- def get_local_world_size() -> int:
47
- return int(os.environ.get("LOCAL_WORLD_SIZE") or 1)
48
-
49
-
50
- def get_global_rank() -> int:
51
- if is_distributed():
52
- return int(os.environ.get("RANK") or dist.get_rank())
53
- else:
54
- return 0
55
-
56
-
57
- def get_local_rank() -> int:
58
- return int(os.environ.get("LOCAL_RANK") or 0)
59
-
60
-
61
- def get_fs_local_rank() -> int:
62
- """Get the local rank per filesystem, meaning that, regardless of the number of nodes,
63
- if all ranks share the same filesystem then `get_fs_local_rank()` will be equivalent to `get_global_rank()`,
64
- but if nodes do not share the same filesystem then `get_fs_local_rank()` will be equivalent to `get_local_rank()`.
65
- """
66
- if os.environ.get("OLMO_SHARED_FS"):
67
- return int(os.environ.get("FS_LOCAL_RANK") or get_global_rank())
68
- else:
69
- return int(os.environ.get("FS_LOCAL_RANK") or get_local_rank())
70
-
71
-
72
- def move_to_device(o: T, device: torch.device) -> T:
73
- if isinstance(o, torch.Tensor):
74
- return o.to(device) # type: ignore[return-value]
75
- elif isinstance(o, dict):
76
- return {k: move_to_device(v, device) for k, v in o.items()} # type: ignore[return-value]
77
- elif isinstance(o, list):
78
- return [move_to_device(x, device) for x in o] # type: ignore[return-value]
79
- elif isinstance(o, tuple):
80
- return tuple((move_to_device(x, device) for x in o)) # type: ignore[return-value]
81
- else:
82
- return o
83
-
84
-
85
- def ensure_finite_(x: torch.Tensor, check_neg_inf: bool = True, check_pos_inf: bool = False):
86
- """
87
- Modify ``x`` in place to replace ``float("-inf")`` with the minimum value of the dtype when ``check_neg_inf``
88
- is ``True`` and to replace ``float("inf")`` with the maximum value of the dtype when ``check_pos_inf`` is ``True``.
89
- """
90
- if check_neg_inf:
91
- x.masked_fill_(x == float("-inf"), torch.finfo(x.dtype).min)
92
- if check_pos_inf:
93
- x.masked_fill_(x == float("inf"), torch.finfo(x.dtype).max)
94
-
95
-
96
- def get_default_device() -> torch.device:
97
- if torch.cuda.is_available() and torch.cuda.is_initialized():
98
- return torch.device("cuda")
99
- else:
100
- return torch.device("cpu")
101
-
102
-
103
- def barrier() -> None:
104
- if is_distributed():
105
- dist.barrier()
106
-
107
-
108
- def peak_gpu_memory(reset: bool = False) -> Optional[float]:
109
- """
110
- Get the peak GPU memory usage in MB across all ranks.
111
- Only rank 0 will get the final result.
112
- """
113
- if not torch.cuda.is_available():
114
- return None
115
-
116
- device = torch.device("cuda")
117
- peak_mb = torch.cuda.max_memory_allocated(device) / 1000000
118
- if is_distributed():
119
- peak_mb_tensor = torch.tensor(peak_mb, device=device)
120
- dist.reduce(peak_mb_tensor, 0, dist.ReduceOp.MAX)
121
- peak_mb = peak_mb_tensor.item()
122
-
123
- if reset:
124
- # Reset peak stats.
125
- torch.cuda.reset_max_memory_allocated(device)
126
-
127
- return peak_mb
128
-
129
-
130
- V = TypeVar("V", bool, int, float)
131
-
132
-
133
- def synchronize_value(value: V, device: torch.device) -> V:
134
- if dist.is_available() and dist.is_initialized():
135
- value_tensor = torch.tensor(value, device=device)
136
- dist.broadcast(value_tensor, 0)
137
- return value_tensor.item() # type: ignore
138
- else:
139
- return value
140
-
141
-
142
- def synchronize_flag(flag: bool, device: torch.device) -> bool:
143
- return synchronize_value(flag, device)
144
-
145
-
146
- def gc_cuda():
147
- gc.collect()
148
- if torch.cuda.is_available():
149
- torch.cuda.empty_cache()
150
-
151
-
152
- def listinstr(lst, s, delimiter=None):
153
- assert isinstance(lst, list)
154
- for item in lst:
155
- if delimiter:
156
- if all(x in s for x in item.split(delimiter)):
157
- return True
158
- else:
159
- if item in s:
160
- return True
161
- return False
162
-
163
-
164
- def freeze_module(module: torch.nn.Module, exclude_params: Optional[List[str]] = None):
165
- for name, param in module.named_parameters():
166
- if exclude_params is not None and listinstr(exclude_params, name):
167
- continue
168
- param.requires_grad = False
169
-
170
-
171
- def freeze_parameters_by_name(model: torch.nn.Module, freeze_names: Tuple[str]):
172
- for name in freeze_names:
173
- try:
174
- module_or_param = model.get_submodule(name)
175
- except:
176
- try:
177
- module_or_param = model.get_parameter(name)
178
- except:
179
- log.warning(f"Could not find module or parameter with name {name}")
180
- if isinstance(module_or_param, torch.nn.Module):
181
- freeze_module(module_or_param)
182
- else:
183
- module_or_param.requires_grad = False