liuganghuggingface commited on
Commit
6348fcc
1 Parent(s): 4132717

Update graph_decoder/diffusion_utils.py

Browse files
Files changed (1) hide show
  1. graph_decoder/diffusion_utils.py +388 -389
graph_decoder/diffusion_utils.py CHANGED
@@ -1,10 +1,11 @@
 
 
 
 
1
  import torch
2
  import numpy as np
3
  from torch.nn import functional as F
4
  from torch_geometric.utils import to_dense_adj, to_dense_batch, remove_self_loops
5
- import os
6
- import json
7
- import yaml
8
  from types import SimpleNamespace
9
 
10
  def dict_to_namespace(d):
@@ -127,402 +128,400 @@ def encode_no_edge(E):
127
  return E
128
 
129
 
130
- #### diffusion utils
131
- class DistributionNodes:
132
- def __init__(self, histogram):
133
- """Compute the distribution of the number of nodes in the dataset, and sample from this distribution.
134
- historgram: dict. The keys are num_nodes, the values are counts
135
- """
136
-
137
- if type(histogram) == dict:
138
- max_n_nodes = max(histogram.keys())
139
- prob = torch.zeros(max_n_nodes + 1)
140
- for num_nodes, count in histogram.items():
141
- prob[num_nodes] = count
142
- else:
143
- prob = histogram
144
-
145
- self.prob = prob / prob.sum()
146
- self.m = torch.distributions.Categorical(prob)
147
-
148
- def sample_n(self, n_samples, device):
149
- idx = self.m.sample((n_samples,))
150
- return idx.to(device)
151
-
152
- def log_prob(self, batch_n_nodes):
153
- assert len(batch_n_nodes.size()) == 1
154
- p = self.prob.to(batch_n_nodes.device)
155
-
156
- probas = p[batch_n_nodes]
157
- log_p = torch.log(probas + 1e-30)
158
- return log_p
159
-
160
-
161
- class PredefinedNoiseScheduleDiscrete(torch.nn.Module):
162
- def __init__(self, noise_schedule, timesteps):
163
- super(PredefinedNoiseScheduleDiscrete, self).__init__()
164
- self.timesteps = timesteps
165
-
166
- betas = cosine_beta_schedule_discrete(timesteps)
167
- self.register_buffer("betas", torch.from_numpy(betas).float())
168
-
169
- # 0.9999
170
- self.alphas = 1 - torch.clamp(self.betas, min=0, max=1)
171
-
172
- log_alpha = torch.log(self.alphas)
173
- log_alpha_bar = torch.cumsum(log_alpha, dim=0)
174
- self.alphas_bar = torch.exp(log_alpha_bar)
175
-
176
- def forward(self, t_normalized=None, t_int=None):
177
- assert int(t_normalized is None) + int(t_int is None) == 1
178
- if t_int is None:
179
- t_int = torch.round(t_normalized * self.timesteps)
180
- self.betas = self.betas.type_as(t_int)
181
- return self.betas[t_int.long()]
182
-
183
- def get_alpha_bar(self, t_normalized=None, t_int=None):
184
- assert int(t_normalized is None) + int(t_int is None) == 1
185
- if t_int is None:
186
- t_int = torch.round(t_normalized * self.timesteps)
187
- self.alphas_bar = self.alphas_bar.type_as(t_int)
188
- return self.alphas_bar[t_int.long()]
189
-
190
-
191
- class DiscreteUniformTransition:
192
- def __init__(self, x_classes: int, e_classes: int, y_classes: int):
193
- self.X_classes = x_classes
194
- self.E_classes = e_classes
195
- self.y_classes = y_classes
196
- self.u_x = torch.ones(1, self.X_classes, self.X_classes)
197
- if self.X_classes > 0:
198
- self.u_x = self.u_x / self.X_classes
199
-
200
- self.u_e = torch.ones(1, self.E_classes, self.E_classes)
201
- if self.E_classes > 0:
202
- self.u_e = self.u_e / self.E_classes
203
-
204
- self.u_y = torch.ones(1, self.y_classes, self.y_classes)
205
- if self.y_classes > 0:
206
- self.u_y = self.u_y / self.y_classes
207
-
208
- def get_Qt(self, beta_t, device, X=None, flatten_e=None):
209
- """Returns one-step transition matrices for X and E, from step t - 1 to step t.
210
- Qt = (1 - beta_t) * I + beta_t / K
211
-
212
- beta_t: (bs) noise level between 0 and 1
213
- returns: qx (bs, dx, dx), qe (bs, de, de), qy (bs, dy, dy).
214
- """
215
- beta_t = beta_t.unsqueeze(1)
216
- beta_t = beta_t.to(device)
217
- self.u_x = self.u_x.to(device)
218
- self.u_e = self.u_e.to(device)
219
- self.u_y = self.u_y.to(device)
220
-
221
- q_x = beta_t * self.u_x + (1 - beta_t) * torch.eye(
222
- self.X_classes, device=device
223
- ).unsqueeze(0)
224
- q_e = beta_t * self.u_e + (1 - beta_t) * torch.eye(
225
- self.E_classes, device=device
226
- ).unsqueeze(0)
227
- q_y = beta_t * self.u_y + (1 - beta_t) * torch.eye(
228
- self.y_classes, device=device
229
- ).unsqueeze(0)
230
-
231
- return PlaceHolder(X=q_x, E=q_e, y=q_y)
232
-
233
- def get_Qt_bar(self, alpha_bar_t, device, X=None, flatten_e=None):
234
- """Returns t-step transition matrices for X and E, from step 0 to step t.
235
- Qt = prod(1 - beta_t) * I + (1 - prod(1 - beta_t)) / K
236
-
237
- alpha_bar_t: (bs) Product of the (1 - beta_t) for each time step from 0 to t.
238
- returns: qx (bs, dx, dx), qe (bs, de, de), qy (bs, dy, dy).
239
- """
240
- alpha_bar_t = alpha_bar_t.unsqueeze(1)
241
- alpha_bar_t = alpha_bar_t.to(device)
242
- self.u_x = self.u_x.to(device)
243
- self.u_e = self.u_e.to(device)
244
- self.u_y = self.u_y.to(device)
245
-
246
- q_x = (
247
- alpha_bar_t * torch.eye(self.X_classes, device=device).unsqueeze(0)
248
- + (1 - alpha_bar_t) * self.u_x
249
- )
250
- q_e = (
251
- alpha_bar_t * torch.eye(self.E_classes, device=device).unsqueeze(0)
252
- + (1 - alpha_bar_t) * self.u_e
253
- )
254
- q_y = (
255
- alpha_bar_t * torch.eye(self.y_classes, device=device).unsqueeze(0)
256
- + (1 - alpha_bar_t) * self.u_y
257
- )
258
-
259
- return PlaceHolder(X=q_x, E=q_e, y=q_y)
260
-
261
-
262
- class MarginalTransition:
263
- def __init__(
264
- self, x_marginals, e_marginals, xe_conditions, ex_conditions, y_classes, n_nodes
265
- ):
266
- self.X_classes = len(x_marginals)
267
- self.E_classes = len(e_marginals)
268
- self.y_classes = y_classes
269
- self.x_marginals = x_marginals # Dx
270
- self.e_marginals = e_marginals # Dx, De
271
- self.xe_conditions = xe_conditions
272
- # print('e_marginals.dtype', e_marginals.dtype)
273
- # print('x_marginals.dtype', x_marginals.dtype)
274
- # print('xe_conditions.dtype', xe_conditions.dtype)
275
-
276
- self.u_x = (
277
- x_marginals.unsqueeze(0).expand(self.X_classes, -1).unsqueeze(0)
278
- ) # 1, Dx, Dx
279
- self.u_e = (
280
- e_marginals.unsqueeze(0).expand(self.E_classes, -1).unsqueeze(0)
281
- ) # 1, De, De
282
- self.u_xe = xe_conditions.unsqueeze(0) # 1, Dx, De
283
- self.u_ex = ex_conditions.unsqueeze(0) # 1, De, Dx
284
- self.u = self.get_union_transition(
285
- self.u_x, self.u_e, self.u_xe, self.u_ex, n_nodes
286
- ) # 1, Dx + n*De, Dx + n*De
287
-
288
- def get_union_transition(self, u_x, u_e, u_xe, u_ex, n_nodes):
289
- u_e = u_e.repeat(1, n_nodes, n_nodes) # (1, n*de, n*de)
290
- u_xe = u_xe.repeat(1, 1, n_nodes) # (1, dx, n*de)
291
- u_ex = u_ex.repeat(1, n_nodes, 1) # (1, n*de, dx)
292
- u0 = torch.cat([u_x, u_xe], dim=2) # (1, dx, dx + n*de)
293
- u1 = torch.cat([u_ex, u_e], dim=2) # (1, n*de, dx + n*de)
294
- u = torch.cat([u0, u1], dim=1) # (1, dx + n*de, dx + n*de)
295
- return u
296
-
297
- def index_edge_margin(self, X, q_e, n_bond=5):
298
- # q_e: (bs, dx, de) --> (bs, n, de)
299
- bs, n, n_atom = X.shape
300
- node_indices = X.argmax(-1) # (bs, n)
301
- ind = node_indices[:, :, None].expand(bs, n, n_bond)
302
- q_e = torch.gather(q_e, 1, ind)
303
- return q_e
304
-
305
- def get_Qt(self, beta_t, device):
306
- """Returns one-step transition matrices for X and E, from step t - 1 to step t.
307
- Qt = (1 - beta_t) * I + beta_t / K
308
- beta_t: (bs)
309
- returns: q (bs, d0, d0)
310
- """
311
- bs = beta_t.size(0)
312
- d0 = self.u.size(-1)
313
- self.u = self.u.to(device)
314
- u = self.u.expand(bs, d0, d0)
315
-
316
- beta_t = beta_t.to(device)
317
- beta_t = beta_t.view(bs, 1, 1)
318
- q = beta_t * u + (1 - beta_t) * torch.eye(d0, device=device, dtype=self.u.dtype).unsqueeze(0)
319
-
320
- return PlaceHolder(X=q, E=None, y=None)
321
-
322
- def get_Qt_bar(self, alpha_bar_t, device):
323
- """Returns t-step transition matrices for X and E, from step 0 to step t.
324
- Qt = prod(1 - beta_t) * I + (1 - prod(1 - beta_t)) * K
325
- alpha_bar_t: (bs, 1) roduct of the (1 - beta_t) for each time step from 0 to t.
326
- returns: q (bs, d0, d0)
327
- """
328
- bs = alpha_bar_t.size(0)
329
- d0 = self.u.size(-1)
330
- alpha_bar_t = alpha_bar_t.to(device)
331
- alpha_bar_t = alpha_bar_t.view(bs, 1, 1)
332
- self.u = self.u.to(device)
333
- q = (
334
- alpha_bar_t * torch.eye(d0, device=device, dtype=self.u.dtype).unsqueeze(0)
335
- + (1 - alpha_bar_t) * self.u
336
- )
337
-
338
- return PlaceHolder(X=q, E=None, y=None)
339
-
340
-
341
- def sum_except_batch(x):
342
- return x.reshape(x.size(0), -1).sum(dim=-1)
343
-
344
-
345
- def assert_correctly_masked(variable, node_mask):
346
- assert (
347
- variable * (1 - node_mask.long())
348
- ).abs().max().item() < 1e-4, "Variables not masked properly."
349
-
350
-
351
- def cosine_beta_schedule_discrete(timesteps, s=0.008):
352
- """Cosine schedule as proposed in https://openreview.net/forum?id=-NEXDKk8gZ."""
353
- steps = timesteps + 2
354
- x = np.linspace(0, steps, steps)
355
-
356
- alphas_cumprod = np.cos(0.5 * np.pi * ((x / steps) + s) / (1 + s)) ** 2
357
- alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
358
- alphas = alphas_cumprod[1:] / alphas_cumprod[:-1]
359
- betas = 1 - alphas
360
- return betas.squeeze()
361
-
362
-
363
- def sample_discrete_features(probX, probE, node_mask, step=None, add_nose=True):
364
- """Sample features from multinomial distribution with given probabilities (probX, probE, proby)
365
- :param probX: bs, n, dx_out node features
366
- :param probE: bs, n, n, de_out edge features
367
- :param proby: bs, dy_out global features.
368
- """
369
- bs, n, _ = probX.shape
370
-
371
- # Noise X
372
- # The masked rows should define probability distributions as well
373
- probX[~node_mask] = 1 / probX.shape[-1]
374
-
375
- # Flatten the probability tensor to sample with multinomial
376
- probX = probX.reshape(bs * n, -1) # (bs * n, dx_out)
377
-
378
- # Sample X
379
- probX = probX.clamp_min(1e-5)
380
- probX = probX / probX.sum(dim=-1, keepdim=True)
381
- X_t = probX.multinomial(1) # (bs * n, 1)
382
- X_t = X_t.reshape(bs, n) # (bs, n)
383
-
384
- # Noise E
385
- # The masked rows should define probability distributions as well
386
- inverse_edge_mask = ~(node_mask.unsqueeze(1) * node_mask.unsqueeze(2))
387
- diag_mask = torch.eye(n).unsqueeze(0).expand(bs, -1, -1)
388
-
389
- probE[inverse_edge_mask] = 1 / probE.shape[-1]
390
- probE[diag_mask.bool()] = 1 / probE.shape[-1]
391
- probE = probE.reshape(bs * n * n, -1) # (bs * n * n, de_out)
392
- probE = probE.clamp_min(1e-5)
393
- probE = probE / probE.sum(dim=-1, keepdim=True)
394
-
395
- # Sample E
396
- E_t = probE.multinomial(1).reshape(bs, n, n) # (bs, n, n)
397
- E_t = torch.triu(E_t, diagonal=1)
398
- E_t = E_t + torch.transpose(E_t, 1, 2)
399
-
400
- return PlaceHolder(X=X_t, E=E_t, y=torch.zeros(bs, 0).type_as(X_t))
401
-
402
-
403
- def mask_distributions(true_X, true_E, pred_X, pred_E, node_mask):
404
- # Add a small value everywhere to avoid nans
405
- pred_X = pred_X.clamp_min(1e-5)
406
- pred_X = pred_X / torch.sum(pred_X, dim=-1, keepdim=True)
407
-
408
- pred_E = pred_E.clamp_min(1e-5)
409
- pred_E = pred_E / torch.sum(pred_E, dim=-1, keepdim=True)
410
-
411
- # Set masked rows to arbitrary distributions, so it doesn't contribute to loss
412
- row_X = torch.ones(true_X.size(-1), dtype=true_X.dtype, device=true_X.device)
413
- row_E = torch.zeros(
414
- true_E.size(-1), dtype=true_E.dtype, device=true_E.device
415
- ).clamp_min(1e-5)
416
- row_E[0] = 1.0
417
-
418
- diag_mask = ~torch.eye(
419
- node_mask.size(1), device=node_mask.device, dtype=torch.bool
420
- ).unsqueeze(0)
421
- true_X[~node_mask] = row_X
422
- true_E[~(node_mask.unsqueeze(1) * node_mask.unsqueeze(2) * diag_mask), :] = row_E
423
- pred_X[~node_mask] = row_X.type_as(pred_X)
424
- pred_E[~(node_mask.unsqueeze(1) * node_mask.unsqueeze(2) * diag_mask), :] = (
425
- row_E.type_as(pred_E)
426
- )
427
-
428
- return true_X, true_E, pred_X, pred_E
429
-
430
-
431
- def forward_diffusion(X, X_t, Qt, Qsb, Qtb, X_dim):
432
- bs, n, d = X.shape
433
-
434
- Qt_X_T = torch.transpose(Qt.X, -2, -1) # (bs, d, d)
435
- left_term = X_t @ Qt_X_T # (bs, N, d)
436
- right_term = X @ Qsb.X # (bs, N, d)
437
-
438
- numerator = left_term * right_term # (bs, N, d)
439
- denominator = X @ Qtb.X # (bs, N, d) @ (bs, d, d) = (bs, N, d)
440
- denominator = denominator * X_t
441
-
442
- num_X = numerator[:, :, :X_dim]
443
- num_E = numerator[:, :, X_dim:].reshape(bs, n * n, -1)
444
-
445
- deno_X = denominator[:, :, :X_dim]
446
- deno_E = denominator[:, :, X_dim:].reshape(bs, n * n, -1)
447
-
448
- denominator = denominator.unsqueeze(-1) # (bs, N, 1)
449
-
450
- deno_X = deno_X.sum(dim=-1, keepdim=True)
451
- deno_E = deno_E.sum(dim=-1, keepdim=True)
452
-
453
- deno_X[deno_X == 0.0] = 1
454
- deno_E[deno_E == 0.0] = 1
455
- prob_X = num_X / deno_X
456
- prob_E = num_E / deno_E
457
-
458
- prob_E = prob_E / prob_E.sum(dim=-1, keepdim=True)
459
- prob_X = prob_X / prob_X.sum(dim=-1, keepdim=True)
460
- return PlaceHolder(X=prob_X, E=prob_E, y=None)
461
-
462
-
463
- def reverse_diffusion(predX_0, X_t, Qt, Qsb, Qtb):
464
- """M: X or E
465
- Compute xt @ Qt.T * x0 @ Qsb / x0 @ Qtb @ xt.T for each possible value of x0
466
- X_t: bs, n, dt or bs, n, n, dt
467
- Qt: bs, d_t-1, dt
468
- Qsb: bs, d0, d_t-1
469
- Qtb: bs, d0, dt.
470
- """
471
- Qt_T = Qt.transpose(-1, -2) # bs, N, dt
472
- assert Qt.dim() == 3
473
- left_term = X_t @ Qt_T # bs, N, d_t-1
474
- right_term = predX_0 @ Qsb
475
- numerator = left_term * right_term # bs, N, d_t-1
476
-
477
- denominator = Qtb @ X_t.transpose(-1, -2) # bs, d0, N
478
- denominator = denominator.transpose(-1, -2) # bs, N, d0
479
- return numerator / denominator.clamp_min(1e-5)
480
-
481
- def reverse_tensor(x):
482
- return x[torch.arange(x.size(0) - 1, -1, -1)]
483
 
484
- def sample_discrete_feature_noise(limit_dist, node_mask):
485
- """Sample from the limit distribution of the diffusion process"""
486
- bs, n_max = node_mask.shape
487
- x_limit = limit_dist.X[None, None, :].expand(bs, n_max, -1)
488
- x_limit = x_limit.to(node_mask.device)
489
 
490
- U_X = x_limit.flatten(end_dim=-2).multinomial(1).reshape(bs, n_max)
491
- U_X = F.one_hot(U_X.long(), num_classes=x_limit.shape[-1]).type_as(x_limit)
492
 
493
- e_limit = limit_dist.E[None, None, None, :].expand(bs, n_max, n_max, -1)
494
- U_E = e_limit.flatten(end_dim=-2).multinomial(1).reshape(bs, n_max, n_max)
495
- U_E = F.one_hot(U_E.long(), num_classes=e_limit.shape[-1]).type_as(x_limit)
496
 
497
- U_X = U_X.to(node_mask.device)
498
- U_E = U_E.to(node_mask.device)
499
 
500
- # Get upper triangular part of edge noise, without main diagonal
501
- upper_triangular_mask = torch.zeros_like(U_E)
502
- indices = torch.triu_indices(row=U_E.size(1), col=U_E.size(2), offset=1)
503
- upper_triangular_mask[:, indices[0], indices[1], :] = 1
504
 
505
- U_E = U_E * upper_triangular_mask
506
- U_E = U_E + torch.transpose(U_E, 1, 2)
507
 
508
- assert (U_E == torch.transpose(U_E, 1, 2)).all()
509
- return PlaceHolder(X=U_X, E=U_E, y=None).mask(node_mask)
510
 
511
 
512
- def index_QE(X, q_e, n_bond=5):
513
- bs, n, n_atom = X.shape
514
- node_indices = X.argmax(-1) # (bs, n)
515
 
516
- exp_ind1 = node_indices[:, :, None, None, None].expand(
517
- bs, n, n_atom, n_bond, n_bond
518
- )
519
- exp_ind2 = node_indices[:, :, None, None, None].expand(bs, n, n, n_bond, n_bond)
520
 
521
- q_e = torch.gather(q_e, 1, exp_ind1)
522
- q_e = torch.gather(q_e, 2, exp_ind2) # (bs, n, n, n_bond, n_bond)
523
 
524
- node_mask = X.sum(-1) != 0
525
- no_edge = (~node_mask)[:, :, None] & (~node_mask)[:, None, :]
526
- q_e[no_edge] = torch.tensor([1, 0, 0, 0, 0]).type_as(q_e)
527
 
528
- return q_e
 
1
+ import os
2
+ import json
3
+ import yaml
4
+
5
  import torch
6
  import numpy as np
7
  from torch.nn import functional as F
8
  from torch_geometric.utils import to_dense_adj, to_dense_batch, remove_self_loops
 
 
 
9
  from types import SimpleNamespace
10
 
11
  def dict_to_namespace(d):
 
128
  return E
129
 
130
 
131
+ # #### diffusion utils
132
+ # class DistributionNodes:
133
+ # def __init__(self, histogram):
134
+ # """Compute the distribution of the number of nodes in the dataset, and sample from this distribution.
135
+ # historgram: dict. The keys are num_nodes, the values are counts
136
+ # """
137
+
138
+ # if type(histogram) == dict:
139
+ # max_n_nodes = max(histogram.keys())
140
+ # prob = torch.zeros(max_n_nodes + 1)
141
+ # for num_nodes, count in histogram.items():
142
+ # prob[num_nodes] = count
143
+ # else:
144
+ # prob = histogram
145
+
146
+ # self.prob = prob / prob.sum()
147
+ # self.m = torch.distributions.Categorical(prob)
148
+
149
+ # def sample_n(self, n_samples, device):
150
+ # idx = self.m.sample((n_samples,))
151
+ # return idx.to(device)
152
+
153
+ # def log_prob(self, batch_n_nodes):
154
+ # assert len(batch_n_nodes.size()) == 1
155
+ # p = self.prob.to(batch_n_nodes.device)
156
+
157
+ # probas = p[batch_n_nodes]
158
+ # log_p = torch.log(probas + 1e-30)
159
+ # return log_p
160
+
161
+
162
+ # class PredefinedNoiseScheduleDiscrete(torch.nn.Module):
163
+ # def __init__(self, noise_schedule, timesteps):
164
+ # super(PredefinedNoiseScheduleDiscrete, self).__init__()
165
+ # self.timesteps = timesteps
166
+
167
+ # betas = cosine_beta_schedule_discrete(timesteps)
168
+ # self.register_buffer("betas", torch.from_numpy(betas).float())
169
+
170
+ # # 0.9999
171
+ # self.alphas = 1 - torch.clamp(self.betas, min=0, max=1)
172
+
173
+ # log_alpha = torch.log(self.alphas)
174
+ # log_alpha_bar = torch.cumsum(log_alpha, dim=0)
175
+ # self.alphas_bar = torch.exp(log_alpha_bar)
176
+
177
+ # def forward(self, t_normalized=None, t_int=None):
178
+ # assert int(t_normalized is None) + int(t_int is None) == 1
179
+ # if t_int is None:
180
+ # t_int = torch.round(t_normalized * self.timesteps)
181
+ # self.betas = self.betas.type_as(t_int)
182
+ # return self.betas[t_int.long()]
183
+
184
+ # def get_alpha_bar(self, t_normalized=None, t_int=None):
185
+ # assert int(t_normalized is None) + int(t_int is None) == 1
186
+ # if t_int is None:
187
+ # t_int = torch.round(t_normalized * self.timesteps)
188
+ # self.alphas_bar = self.alphas_bar.type_as(t_int)
189
+ # return self.alphas_bar[t_int.long()]
190
+
191
+
192
+ # # class DiscreteUniformTransition:
193
+ # # def __init__(self, x_classes: int, e_classes: int, y_classes: int):
194
+ # # self.X_classes = x_classes
195
+ # # self.E_classes = e_classes
196
+ # # self.y_classes = y_classes
197
+ # # self.u_x = torch.ones(1, self.X_classes, self.X_classes)
198
+ # # if self.X_classes > 0:
199
+ # # self.u_x = self.u_x / self.X_classes
200
+
201
+ # # self.u_e = torch.ones(1, self.E_classes, self.E_classes)
202
+ # # if self.E_classes > 0:
203
+ # # self.u_e = self.u_e / self.E_classes
204
+
205
+ # # self.u_y = torch.ones(1, self.y_classes, self.y_classes)
206
+ # # if self.y_classes > 0:
207
+ # # self.u_y = self.u_y / self.y_classes
208
+
209
+ # # def get_Qt(self, beta_t, device, X=None, flatten_e=None):
210
+ # # """Returns one-step transition matrices for X and E, from step t - 1 to step t.
211
+ # # Qt = (1 - beta_t) * I + beta_t / K
212
+
213
+ # # beta_t: (bs) noise level between 0 and 1
214
+ # # returns: qx (bs, dx, dx), qe (bs, de, de), qy (bs, dy, dy).
215
+ # # """
216
+ # # beta_t = beta_t.unsqueeze(1)
217
+ # # beta_t = beta_t.to(device)
218
+ # # self.u_x = self.u_x.to(device)
219
+ # # self.u_e = self.u_e.to(device)
220
+ # # self.u_y = self.u_y.to(device)
221
+
222
+ # # q_x = beta_t * self.u_x + (1 - beta_t) * torch.eye(
223
+ # # self.X_classes, device=device
224
+ # # ).unsqueeze(0)
225
+ # # q_e = beta_t * self.u_e + (1 - beta_t) * torch.eye(
226
+ # # self.E_classes, device=device
227
+ # # ).unsqueeze(0)
228
+ # # q_y = beta_t * self.u_y + (1 - beta_t) * torch.eye(
229
+ # # self.y_classes, device=device
230
+ # # ).unsqueeze(0)
231
+
232
+ # # return PlaceHolder(X=q_x, E=q_e, y=q_y)
233
+
234
+ # # def get_Qt_bar(self, alpha_bar_t, device, X=None, flatten_e=None):
235
+ # # """Returns t-step transition matrices for X and E, from step 0 to step t.
236
+ # # Qt = prod(1 - beta_t) * I + (1 - prod(1 - beta_t)) / K
237
+
238
+ # # alpha_bar_t: (bs) Product of the (1 - beta_t) for each time step from 0 to t.
239
+ # # returns: qx (bs, dx, dx), qe (bs, de, de), qy (bs, dy, dy).
240
+ # # """
241
+ # # alpha_bar_t = alpha_bar_t.unsqueeze(1)
242
+ # # alpha_bar_t = alpha_bar_t.to(device)
243
+ # # self.u_x = self.u_x.to(device)
244
+ # # self.u_e = self.u_e.to(device)
245
+ # # self.u_y = self.u_y.to(device)
246
+
247
+ # # q_x = (
248
+ # # alpha_bar_t * torch.eye(self.X_classes, device=device).unsqueeze(0)
249
+ # # + (1 - alpha_bar_t) * self.u_x
250
+ # # )
251
+ # # q_e = (
252
+ # # alpha_bar_t * torch.eye(self.E_classes, device=device).unsqueeze(0)
253
+ # # + (1 - alpha_bar_t) * self.u_e
254
+ # # )
255
+ # # q_y = (
256
+ # # alpha_bar_t * torch.eye(self.y_classes, device=device).unsqueeze(0)
257
+ # # + (1 - alpha_bar_t) * self.u_y
258
+ # # )
259
+
260
+ # # return PlaceHolder(X=q_x, E=q_e, y=q_y)
261
+
262
+
263
+ # class MarginalTransition:
264
+ # def __init__(
265
+ # self, x_marginals, e_marginals, xe_conditions, ex_conditions, y_classes, n_nodes
266
+ # ):
267
+ # self.X_classes = len(x_marginals)
268
+ # self.E_classes = len(e_marginals)
269
+ # self.y_classes = y_classes
270
+ # self.x_marginals = x_marginals # Dx
271
+ # self.e_marginals = e_marginals # Dx, De
272
+ # self.xe_conditions = xe_conditions
273
+ # # print('e_marginals.dtype', e_marginals.dtype)
274
+ # # print('x_marginals.dtype', x_marginals.dtype)
275
+ # # print('xe_conditions.dtype', xe_conditions.dtype)
276
+
277
+ # self.u_x = (
278
+ # x_marginals.unsqueeze(0).expand(self.X_classes, -1).unsqueeze(0)
279
+ # ) # 1, Dx, Dx
280
+ # self.u_e = (
281
+ # e_marginals.unsqueeze(0).expand(self.E_classes, -1).unsqueeze(0)
282
+ # ) # 1, De, De
283
+ # self.u_xe = xe_conditions.unsqueeze(0) # 1, Dx, De
284
+ # self.u_ex = ex_conditions.unsqueeze(0) # 1, De, Dx
285
+ # self.u = self.get_union_transition(
286
+ # self.u_x, self.u_e, self.u_xe, self.u_ex, n_nodes
287
+ # ) # 1, Dx + n*De, Dx + n*De
288
+
289
+ # def get_union_transition(self, u_x, u_e, u_xe, u_ex, n_nodes):
290
+ # u_e = u_e.repeat(1, n_nodes, n_nodes) # (1, n*de, n*de)
291
+ # u_xe = u_xe.repeat(1, 1, n_nodes) # (1, dx, n*de)
292
+ # u_ex = u_ex.repeat(1, n_nodes, 1) # (1, n*de, dx)
293
+ # u0 = torch.cat([u_x, u_xe], dim=2) # (1, dx, dx + n*de)
294
+ # u1 = torch.cat([u_ex, u_e], dim=2) # (1, n*de, dx + n*de)
295
+ # u = torch.cat([u0, u1], dim=1) # (1, dx + n*de, dx + n*de)
296
+ # return u
297
+
298
+ # def index_edge_margin(self, X, q_e, n_bond=5):
299
+ # # q_e: (bs, dx, de) --> (bs, n, de)
300
+ # bs, n, n_atom = X.shape
301
+ # node_indices = X.argmax(-1) # (bs, n)
302
+ # ind = node_indices[:, :, None].expand(bs, n, n_bond)
303
+ # q_e = torch.gather(q_e, 1, ind)
304
+ # return q_e
305
+
306
+ # def get_Qt(self, beta_t, device):
307
+ # """Returns one-step transition matrices for X and E, from step t - 1 to step t.
308
+ # Qt = (1 - beta_t) * I + beta_t / K
309
+ # beta_t: (bs)
310
+ # returns: q (bs, d0, d0)
311
+ # """
312
+ # bs = beta_t.size(0)
313
+ # d0 = self.u.size(-1)
314
+ # self.u = self.u.to(device)
315
+ # u = self.u.expand(bs, d0, d0)
316
+
317
+ # beta_t = beta_t.to(device)
318
+ # beta_t = beta_t.view(bs, 1, 1)
319
+ # q = beta_t * u + (1 - beta_t) * torch.eye(d0, device=device, dtype=self.u.dtype).unsqueeze(0)
320
+
321
+ # return PlaceHolder(X=q, E=None, y=None)
322
+
323
+ # def get_Qt_bar(self, alpha_bar_t, device):
324
+ # """Returns t-step transition matrices for X and E, from step 0 to step t.
325
+ # Qt = prod(1 - beta_t) * I + (1 - prod(1 - beta_t)) * K
326
+ # alpha_bar_t: (bs, 1) roduct of the (1 - beta_t) for each time step from 0 to t.
327
+ # returns: q (bs, d0, d0)
328
+ # """
329
+ # bs = alpha_bar_t.size(0)
330
+ # d0 = self.u.size(-1)
331
+ # alpha_bar_t = alpha_bar_t.to(device)
332
+ # alpha_bar_t = alpha_bar_t.view(bs, 1, 1)
333
+ # self.u = self.u.to(device)
334
+ # q = (
335
+ # alpha_bar_t * torch.eye(d0, device=device, dtype=self.u.dtype).unsqueeze(0)
336
+ # + (1 - alpha_bar_t) * self.u
337
+ # )
338
+
339
+ # return PlaceHolder(X=q, E=None, y=None)
340
+
341
+
342
+ # def sum_except_batch(x):
343
+ # return x.reshape(x.size(0), -1).sum(dim=-1)
344
+
345
+ # def assert_correctly_masked(variable, node_mask):
346
+ # assert (
347
+ # variable * (1 - node_mask.long())
348
+ # ).abs().max().item() < 1e-4, "Variables not masked properly."
349
+
350
+ # def cosine_beta_schedule_discrete(timesteps, s=0.008):
351
+ # """Cosine schedule as proposed in https://openreview.net/forum?id=-NEXDKk8gZ."""
352
+ # steps = timesteps + 2
353
+ # x = np.linspace(0, steps, steps)
354
+
355
+ # alphas_cumprod = np.cos(0.5 * np.pi * ((x / steps) + s) / (1 + s)) ** 2
356
+ # alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
357
+ # alphas = alphas_cumprod[1:] / alphas_cumprod[:-1]
358
+ # betas = 1 - alphas
359
+ # return betas.squeeze()
360
+
361
+
362
+ # def sample_discrete_features(probX, probE, node_mask, step=None, add_nose=True):
363
+ # """Sample features from multinomial distribution with given probabilities (probX, probE, proby)
364
+ # :param probX: bs, n, dx_out node features
365
+ # :param probE: bs, n, n, de_out edge features
366
+ # :param proby: bs, dy_out global features.
367
+ # """
368
+ # bs, n, _ = probX.shape
369
+
370
+ # # Noise X
371
+ # # The masked rows should define probability distributions as well
372
+ # probX[~node_mask] = 1 / probX.shape[-1]
373
+
374
+ # # Flatten the probability tensor to sample with multinomial
375
+ # probX = probX.reshape(bs * n, -1) # (bs * n, dx_out)
376
+
377
+ # # Sample X
378
+ # probX = probX.clamp_min(1e-5)
379
+ # probX = probX / probX.sum(dim=-1, keepdim=True)
380
+ # X_t = probX.multinomial(1) # (bs * n, 1)
381
+ # X_t = X_t.reshape(bs, n) # (bs, n)
382
+
383
+ # # Noise E
384
+ # # The masked rows should define probability distributions as well
385
+ # inverse_edge_mask = ~(node_mask.unsqueeze(1) * node_mask.unsqueeze(2))
386
+ # diag_mask = torch.eye(n).unsqueeze(0).expand(bs, -1, -1)
387
+
388
+ # probE[inverse_edge_mask] = 1 / probE.shape[-1]
389
+ # probE[diag_mask.bool()] = 1 / probE.shape[-1]
390
+ # probE = probE.reshape(bs * n * n, -1) # (bs * n * n, de_out)
391
+ # probE = probE.clamp_min(1e-5)
392
+ # probE = probE / probE.sum(dim=-1, keepdim=True)
393
+
394
+ # # Sample E
395
+ # E_t = probE.multinomial(1).reshape(bs, n, n) # (bs, n, n)
396
+ # E_t = torch.triu(E_t, diagonal=1)
397
+ # E_t = E_t + torch.transpose(E_t, 1, 2)
398
+
399
+ # return PlaceHolder(X=X_t, E=E_t, y=torch.zeros(bs, 0).type_as(X_t))
400
+
401
+
402
+ # def mask_distributions(true_X, true_E, pred_X, pred_E, node_mask):
403
+ # # Add a small value everywhere to avoid nans
404
+ # pred_X = pred_X.clamp_min(1e-5)
405
+ # pred_X = pred_X / torch.sum(pred_X, dim=-1, keepdim=True)
406
+
407
+ # pred_E = pred_E.clamp_min(1e-5)
408
+ # pred_E = pred_E / torch.sum(pred_E, dim=-1, keepdim=True)
409
+
410
+ # # Set masked rows to arbitrary distributions, so it doesn't contribute to loss
411
+ # row_X = torch.ones(true_X.size(-1), dtype=true_X.dtype, device=true_X.device)
412
+ # row_E = torch.zeros(
413
+ # true_E.size(-1), dtype=true_E.dtype, device=true_E.device
414
+ # ).clamp_min(1e-5)
415
+ # row_E[0] = 1.0
416
+
417
+ # diag_mask = ~torch.eye(
418
+ # node_mask.size(1), device=node_mask.device, dtype=torch.bool
419
+ # ).unsqueeze(0)
420
+ # true_X[~node_mask] = row_X
421
+ # true_E[~(node_mask.unsqueeze(1) * node_mask.unsqueeze(2) * diag_mask), :] = row_E
422
+ # pred_X[~node_mask] = row_X.type_as(pred_X)
423
+ # pred_E[~(node_mask.unsqueeze(1) * node_mask.unsqueeze(2) * diag_mask), :] = (
424
+ # row_E.type_as(pred_E)
425
+ # )
426
+
427
+ # return true_X, true_E, pred_X, pred_E
428
+
429
+
430
+ # def forward_diffusion(X, X_t, Qt, Qsb, Qtb, X_dim):
431
+ # bs, n, d = X.shape
432
+
433
+ # Qt_X_T = torch.transpose(Qt.X, -2, -1) # (bs, d, d)
434
+ # left_term = X_t @ Qt_X_T # (bs, N, d)
435
+ # right_term = X @ Qsb.X # (bs, N, d)
436
+
437
+ # numerator = left_term * right_term # (bs, N, d)
438
+ # denominator = X @ Qtb.X # (bs, N, d) @ (bs, d, d) = (bs, N, d)
439
+ # denominator = denominator * X_t
440
+
441
+ # num_X = numerator[:, :, :X_dim]
442
+ # num_E = numerator[:, :, X_dim:].reshape(bs, n * n, -1)
443
+
444
+ # deno_X = denominator[:, :, :X_dim]
445
+ # deno_E = denominator[:, :, X_dim:].reshape(bs, n * n, -1)
446
+
447
+ # denominator = denominator.unsqueeze(-1) # (bs, N, 1)
448
+
449
+ # deno_X = deno_X.sum(dim=-1, keepdim=True)
450
+ # deno_E = deno_E.sum(dim=-1, keepdim=True)
451
+
452
+ # deno_X[deno_X == 0.0] = 1
453
+ # deno_E[deno_E == 0.0] = 1
454
+ # prob_X = num_X / deno_X
455
+ # prob_E = num_E / deno_E
456
+
457
+ # prob_E = prob_E / prob_E.sum(dim=-1, keepdim=True)
458
+ # prob_X = prob_X / prob_X.sum(dim=-1, keepdim=True)
459
+ # return PlaceHolder(X=prob_X, E=prob_E, y=None)
460
+
461
+
462
+ # def reverse_diffusion(predX_0, X_t, Qt, Qsb, Qtb):
463
+ # """M: X or E
464
+ # Compute xt @ Qt.T * x0 @ Qsb / x0 @ Qtb @ xt.T for each possible value of x0
465
+ # X_t: bs, n, dt or bs, n, n, dt
466
+ # Qt: bs, d_t-1, dt
467
+ # Qsb: bs, d0, d_t-1
468
+ # Qtb: bs, d0, dt.
469
+ # """
470
+ # Qt_T = Qt.transpose(-1, -2) # bs, N, dt
471
+ # assert Qt.dim() == 3
472
+ # left_term = X_t @ Qt_T # bs, N, d_t-1
473
+ # right_term = predX_0 @ Qsb
474
+ # numerator = left_term * right_term # bs, N, d_t-1
475
+
476
+ # denominator = Qtb @ X_t.transpose(-1, -2) # bs, d0, N
477
+ # denominator = denominator.transpose(-1, -2) # bs, N, d0
478
+ # return numerator / denominator.clamp_min(1e-5)
479
+
480
+ # def reverse_tensor(x):
481
+ # return x[torch.arange(x.size(0) - 1, -1, -1)]
 
 
482
 
483
+ # def sample_discrete_feature_noise(limit_dist, node_mask):
484
+ # """Sample from the limit distribution of the diffusion process"""
485
+ # bs, n_max = node_mask.shape
486
+ # x_limit = limit_dist.X[None, None, :].expand(bs, n_max, -1)
487
+ # x_limit = x_limit.to(node_mask.device)
488
 
489
+ # U_X = x_limit.flatten(end_dim=-2).multinomial(1).reshape(bs, n_max)
490
+ # U_X = F.one_hot(U_X.long(), num_classes=x_limit.shape[-1]).type_as(x_limit)
491
 
492
+ # e_limit = limit_dist.E[None, None, None, :].expand(bs, n_max, n_max, -1)
493
+ # U_E = e_limit.flatten(end_dim=-2).multinomial(1).reshape(bs, n_max, n_max)
494
+ # U_E = F.one_hot(U_E.long(), num_classes=e_limit.shape[-1]).type_as(x_limit)
495
 
496
+ # U_X = U_X.to(node_mask.device)
497
+ # U_E = U_E.to(node_mask.device)
498
 
499
+ # # Get upper triangular part of edge noise, without main diagonal
500
+ # upper_triangular_mask = torch.zeros_like(U_E)
501
+ # indices = torch.triu_indices(row=U_E.size(1), col=U_E.size(2), offset=1)
502
+ # upper_triangular_mask[:, indices[0], indices[1], :] = 1
503
 
504
+ # U_E = U_E * upper_triangular_mask
505
+ # U_E = U_E + torch.transpose(U_E, 1, 2)
506
 
507
+ # assert (U_E == torch.transpose(U_E, 1, 2)).all()
508
+ # return PlaceHolder(X=U_X, E=U_E, y=None).mask(node_mask)
509
 
510
 
511
+ # def index_QE(X, q_e, n_bond=5):
512
+ # bs, n, n_atom = X.shape
513
+ # node_indices = X.argmax(-1) # (bs, n)
514
 
515
+ # exp_ind1 = node_indices[:, :, None, None, None].expand(
516
+ # bs, n, n_atom, n_bond, n_bond
517
+ # )
518
+ # exp_ind2 = node_indices[:, :, None, None, None].expand(bs, n, n, n_bond, n_bond)
519
 
520
+ # q_e = torch.gather(q_e, 1, exp_ind1)
521
+ # q_e = torch.gather(q_e, 2, exp_ind2) # (bs, n, n, n_bond, n_bond)
522
 
523
+ # node_mask = X.sum(-1) != 0
524
+ # no_edge = (~node_mask)[:, :, None] & (~node_mask)[:, None, :]
525
+ # q_e[no_edge] = torch.tensor([1, 0, 0, 0, 0]).type_as(q_e)
526
 
527
+ # return q_e