f-galkin commited on
Commit
7bd2a58
1 Parent(s): 5f87cb2

Upload P3GPT handler and TCM database modules

Browse files
demo/P3LIB/endpoints.py ADDED
@@ -0,0 +1,565 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Any
2
+ import os
3
+ import torch
4
+ from transformers import AutoTokenizer, AutoModel
5
+ import pandas as pd
6
+ import time
7
+ import numpy as np
8
+ from transformers import GenerationConfig
9
+ from P3LIB.precious3_gpt_multi_modal import Custom_MPTForCausalLM
10
+
11
+
12
+ class EndpointHandler:
13
+ def __init__(self, path="insilicomedicine/precious3-gpt", device='cuda:1'):
14
+
15
+ self.device = device
16
+ self.model = AutoModel.from_pretrained(path, trust_remote_code=True).to(self.device)
17
+ self.tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True)
18
+ self.model.config.pad_token_id = self.tokenizer.pad_token_id
19
+ self.model.config.bos_token_id = self.tokenizer.bos_token_id
20
+ self.model.config.eos_token_id = self.tokenizer.eos_token_id
21
+
22
+ unique_entities_p3 = pd.read_csv(
23
+ 'https://huggingface.co/insilicomedicine/precious3-gpt/raw/main/all_entities_with_type.csv')
24
+ self.unique_compounds_p3 = [i.strip() for i in
25
+ unique_entities_p3[unique_entities_p3.type == 'compound'].entity.to_list()]
26
+ self.unique_genes_p3 = [i.strip() for i in
27
+ unique_entities_p3[unique_entities_p3.type == 'gene'].entity.to_list()]
28
+
29
+ def create_prompt(self, prompt_config):
30
+
31
+ prompt = "[BOS]"
32
+
33
+ multi_modal_prefix = ''
34
+
35
+ for k, v in prompt_config.items():
36
+ if k == 'instruction':
37
+ prompt += f'<{v}>' if isinstance(v, str) else "".join([f'<{v_i}>' for v_i in v])
38
+ elif k == 'up':
39
+ if v:
40
+ prompt += f'{multi_modal_prefix}<{k}>{v} </{k}>' if isinstance(v,
41
+ str) else f'{multi_modal_prefix}<{k}>{" ".join(v)} </{k}>'
42
+ elif k == 'down':
43
+ if v:
44
+ prompt += f'{multi_modal_prefix}<{k}>{v} </{k}>' if isinstance(v,
45
+ str) else f'{multi_modal_prefix}<{k}>{" ".join(v)} </{k}>'
46
+ elif k == 'age':
47
+ if isinstance(v, int):
48
+ if prompt_config['species'].strip() == 'human':
49
+ prompt += f'<{k}_individ>{v} </{k}_individ>'
50
+ elif prompt_config['species'].strip() == 'macaque':
51
+ prompt += f'<{k}_individ>Macaca-{int(v / 20)} </{k}_individ>'
52
+ else:
53
+ if v:
54
+ prompt += f'<{k}>{v.strip()} </{k}>' if isinstance(v, str) else f'<{k}>{" ".join(v)} </{k}>'
55
+ else:
56
+ prompt += f'<{k}></{k}>'
57
+ return prompt
58
+
59
+ def generate_with_generation_config(self, input_ids, generation_config, max_new_tokens, random_seed=138):
60
+ torch.manual_seed(random_seed)
61
+
62
+ with torch.no_grad():
63
+ generation_output = self.model.generate(
64
+ input_ids=input_ids,
65
+ generation_config=generation_config,
66
+ return_dict_in_generate=True,
67
+ output_scores=True,
68
+ max_new_tokens=max_new_tokens
69
+ )
70
+ return generation_output
71
+
72
+ def get_gene_probabilities(self, prompt_config, top_k=300, list_type='up', random_seed=138):
73
+ """
74
+ Args:
75
+ top_k: how many top probable tokens to take
76
+ list_type: "up" / "down"
77
+ """
78
+ prompt = self.create_prompt(prompt_config)
79
+ assert list_type in ["up", "down"]
80
+
81
+ if list_type == 'up':
82
+ prompt += "<up>"
83
+
84
+ print(prompt)
85
+ ### Generation config https://huggingface.co/blog/how-to-generate
86
+ generation_config = GenerationConfig(temperature=0.8, num_beams=1, do_sample=True, top_p=None, top_k=3550,
87
+ pad_token_id=self.tokenizer.pad_token_id, num_return_sequences=1)
88
+ inputs = self.tokenizer(prompt, return_tensors="pt")
89
+ input_ids = inputs["input_ids"].to(self.device)
90
+ assert 3 not in input_ids[0]
91
+ max_new_tokens = self.model.config.max_seq_len - len(input_ids[0])
92
+
93
+ generation_output = self.generate_with_generation_config(input_ids=input_ids,
94
+ generation_config=generation_config,
95
+ max_new_tokens=max_new_tokens,
96
+ random_seed=random_seed)
97
+ #  print(generation_output)
98
+ id_4_gene_token = list(generation_output.sequences[0][len(input_ids[0]) - 1:]).index(
99
+ self.tokenizer.convert_tokens_to_ids([f'<{list_type}>'])[0])
100
+ id_4_gene_token += 1
101
+ print('This is token index where gene should be predicted: ', id_4_gene_token)
102
+
103
+ values, indices = torch.topk(generation_output["scores"][id_4_gene_token - 1].view(-1), k=top_k)
104
+ indices_decoded = self.tokenizer.decode(indices, skip_special_tokens=True)
105
+ indices_decoded_list = indices_decoded.split(' ')
106
+
107
+ generated_genes = sorted(set(indices_decoded_list) & set(self.unique_genes_p3), key=indices_decoded_list.index)
108
+ return generated_genes
109
+
110
+
111
+ class HFEndpointHandler:
112
+ def __init__(self, path="insilicomedicine/precious3-gpt", device='cuda:1'):
113
+
114
+ self.device = device
115
+ self.model = AutoModel.from_pretrained(path, trust_remote_code=True).to(self.device)
116
+ self.tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True)
117
+ self.model.config.pad_token_id = self.tokenizer.pad_token_id
118
+ self.model.config.bos_token_id = self.tokenizer.bos_token_id
119
+ self.model.config.eos_token_id = self.tokenizer.eos_token_id
120
+
121
+ unique_entities_p3 = pd.read_csv('https://huggingface.co/insilicomedicine/precious3-gpt/raw/main/all_entities_with_type.csv')
122
+ self.unique_compounds_p3 = [i.strip() for i in unique_entities_p3[unique_entities_p3.type=='compound'].entity.to_list()]
123
+ self.unique_genes_p3 = [i.strip() for i in unique_entities_p3[unique_entities_p3.type=='gene'].entity.to_list()]
124
+
125
+
126
+ def create_prompt(self, prompt_config):
127
+
128
+ prompt = "[BOS]"
129
+
130
+ multi_modal_prefix = ''
131
+
132
+ for k, v in prompt_config.items():
133
+ if k=='instruction':
134
+ prompt+=f'<{v}>' if isinstance(v, str) else "".join([f'<{v_i}>' for v_i in v])
135
+ elif k=='up':
136
+ if v:
137
+ prompt+=f'{multi_modal_prefix}<{k}>{v} </{k}>' if isinstance(v, str) else f'{multi_modal_prefix}<{k}>{" ".join(v)} </{k}>'
138
+ elif k=='down':
139
+ if v:
140
+ prompt+=f'{multi_modal_prefix}<{k}>{v} </{k}>' if isinstance(v, str) else f'{multi_modal_prefix}<{k}>{" ".join(v)} </{k}>'
141
+ elif k=='age':
142
+ if isinstance(v, int):
143
+ if prompt_config['species'].strip() == 'human':
144
+ prompt+=f'<{k}_individ>{v} </{k}_individ>'
145
+ elif prompt_config['species'].strip() == 'macaque':
146
+ prompt+=f'<{k}_individ>Macaca-{int(v/20)} </{k}_individ>'
147
+ else:
148
+ if v:
149
+ prompt+=f'<{k}>{v.strip()} </{k}>' if isinstance(v, str) else f'<{k}>{" ".join(v)} </{k}>'
150
+ else:
151
+ prompt+=f'<{k}></{k}>'
152
+ return prompt
153
+
154
+ def custom_generate(self,
155
+ input_ids,
156
+ device,
157
+ max_new_tokens,
158
+ mode,
159
+ temperature=0.8,
160
+ top_p=0.2, top_k=3550,
161
+ n_next_tokens=30, num_return_sequences=1, random_seed=138):
162
+
163
+ torch.manual_seed(random_seed)
164
+
165
+ # Set parameters
166
+ # temperature - Higher value for more randomness, lower for more control
167
+ # top_p - Probability threshold for nucleus sampling (aka top-p sampling)
168
+ # top_k - Ignore logits below the top-k value to reduce randomness (if non-zero)
169
+ # n_next_tokens - Number of top next tokens when predicting compounds
170
+
171
+ # Generate sequences
172
+ outputs = []
173
+ next_token_compounds = []
174
+ next_token_up_genes = []
175
+ next_token_down_genes = []
176
+
177
+ for _ in range(num_return_sequences):
178
+ start_time = time.time()
179
+ generated_sequence = []
180
+ current_token = input_ids.clone()
181
+
182
+ for _ in range(max_new_tokens): # Maximum length of generated sequence
183
+ # Forward pass through the model
184
+ logits = self.model.forward(
185
+ input_ids=current_token
186
+ )[0]
187
+
188
+ # Apply temperature to logits
189
+ if temperature != 1.0:
190
+ logits = logits / temperature
191
+
192
+ # Apply top-p sampling (nucleus sampling)
193
+ sorted_logits, sorted_indices = torch.sort(logits, descending=True)
194
+ cumulative_probs = torch.cumsum(torch.softmax(sorted_logits, dim=-1), dim=-1)
195
+ sorted_indices_to_remove = cumulative_probs > top_p
196
+
197
+ if top_k > 0:
198
+ sorted_indices_to_remove[..., top_k:] = 1
199
+
200
+ # Set the logit values of the removed indices to a very small negative value
201
+ inf_tensor = torch.tensor(float("-inf")).type(torch.bfloat16).to(logits.device)
202
+
203
+ logits = logits.where(sorted_indices_to_remove, inf_tensor)
204
+
205
+ # Sample the next token
206
+ if current_token[0][-1] == self.tokenizer.encode('<drug>')[0] and len(next_token_compounds)==0:
207
+ next_token_compounds.append(torch.topk(torch.softmax(logits, dim=-1)[0][len(current_token[0])-1, :].flatten(), n_next_tokens).indices)
208
+
209
+ # Sample the next token for UP genes
210
+ if current_token[0][-1] == self.tokenizer.encode('<up>')[0] and len(next_token_up_genes)==0:
211
+ next_token_up_genes.append(torch.topk(torch.softmax(logits, dim=-1)[0][len(current_token[0])-1, :].flatten(), n_next_tokens).indices)
212
+
213
+ # Sample the next token for DOWN genes
214
+ if current_token[0][-1] == self.tokenizer.encode('<down>')[0] and len(next_token_down_genes)==0:
215
+ next_token_down_genes.append(torch.topk(torch.softmax(logits, dim=-1)[0][len(current_token[0])-1, :].flatten(), n_next_tokens).indices)
216
+
217
+ next_token = torch.multinomial(torch.softmax(logits, dim=-1)[0], num_samples=1)[len(current_token[0])-1, :].unsqueeze(0)
218
+
219
+
220
+ # Append the sampled token to the generated sequence
221
+ generated_sequence.append(next_token.item())
222
+
223
+ # Stop generation if an end token is generated
224
+ if next_token == self.tokenizer.eos_token_id:
225
+ break
226
+
227
+ # Prepare input for the next iteration
228
+ current_token = torch.cat((current_token, next_token), dim=-1)
229
+ print(time.time()-start_time)
230
+ outputs.append(generated_sequence)
231
+
232
+ # Process generated up/down lists
233
+ processed_outputs = {"up": [], "down": []}
234
+ if mode in ['meta2diff', 'meta2diff2compound']:
235
+
236
+
237
+ predicted_up_genes_tokens = [self.tokenizer.convert_ids_to_tokens(j) for j in next_token_up_genes]
238
+ predicted_up_genes = []
239
+ for j in predicted_up_genes_tokens:
240
+ generated_up_sample = [i.strip() for i in j]
241
+ predicted_up_genes.append(sorted(set(generated_up_sample) & set(self.unique_genes_p3), key = generated_up_sample.index))
242
+ processed_outputs['up'] = predicted_up_genes
243
+
244
+ predicted_down_genes_tokens = [self.tokenizer.convert_ids_to_tokens(j) for j in next_token_down_genes]
245
+ predicted_down_genes = []
246
+ for j in predicted_down_genes_tokens:
247
+ generated_down_sample = [i.strip() for i in j]
248
+ predicted_down_genes.append(sorted(set(generated_down_sample) & set(self.unique_genes_p3), key = generated_down_sample.index))
249
+ processed_outputs['down'] = predicted_down_genes
250
+
251
+ else:
252
+ processed_outputs = outputs
253
+
254
+ predicted_compounds_ids = [self.tokenizer.convert_ids_to_tokens(j) for j in next_token_compounds]
255
+ predicted_compounds = []
256
+ for j in predicted_compounds_ids:
257
+ predicted_compounds.append([i.strip() for i in j])
258
+
259
+ return processed_outputs, predicted_compounds, random_seed
260
+
261
+
262
+ def __call__(self, data: Dict[str, Any]) -> Dict[str, str]:
263
+ """
264
+ Args:
265
+ data (:dict:):
266
+ The payload with the text prompt and generation parameters.
267
+ """
268
+
269
+ data = data.copy()
270
+
271
+ parameters = data.pop("parameters", None)
272
+ config_data = data.pop("inputs", None)
273
+ mode = data.pop('mode', 'Not specified')
274
+
275
+ prompt = self.create_prompt(config_data)
276
+ if mode != "diff2compound":
277
+ prompt+="<up>"
278
+
279
+ inputs = self.tokenizer(prompt, return_tensors="pt")
280
+ input_ids = inputs["input_ids"].to(self.device)
281
+
282
+ max_new_tokens = self.model.config.max_seq_len - len(input_ids[0])
283
+ try:
284
+
285
+ generated_sequence, raw_next_token_generation, out_seed = self.custom_generate(input_ids = input_ids,
286
+ max_new_tokens=max_new_tokens, mode=mode,
287
+ device=self.device, **parameters)
288
+ next_token_generation = [sorted(set(i) & set(self.unique_compounds_p3), key = i.index) for i in raw_next_token_generation]
289
+
290
+ if mode == "meta2diff":
291
+ outputs = {"up": generated_sequence['up'], "down": generated_sequence['down']}
292
+ out = {"output": outputs, "mode": mode, "message": "Done!", "input": prompt, 'random_seed': out_seed}
293
+ elif mode == "meta2diff2compound":
294
+ outputs = {"up": generated_sequence['up'], "down": generated_sequence['down']}
295
+ out = {
296
+ "output": outputs, "compounds": next_token_generation, "raw_output": raw_next_token_generation, "mode": mode,
297
+ "message": "Done!", "input": prompt, 'random_seed': out_seed}
298
+ elif mode == "diff2compound":
299
+ outputs = generated_sequence
300
+ out = {
301
+ "output": outputs, "compounds": next_token_generation, "raw_output": raw_next_token_generation, "mode": mode,
302
+ "message": "Done!", "input": prompt, 'random_seed': out_seed}
303
+ else:
304
+ out = {"message": f"Specify one of the following modes: meta2diff, meta2diff2compound, diff2compound. Your mode is: {mode}"}
305
+
306
+ except Exception as e:
307
+ print(e)
308
+ outputs, next_token_generation = [None], [None]
309
+ out = {"output": outputs, "mode": mode, 'message': f"{e}", "input": prompt, 'random_seed': 138}
310
+
311
+ return out
312
+
313
+ class MMEndpointHandler:
314
+ def __init__(self, path="insilicomedicine/precious3-gpt-multi-modal", device='cuda:3'):
315
+
316
+ self.device = device
317
+ self.path = path
318
+ # load model and processor from path
319
+ self.model = Custom_MPTForCausalLM.from_pretrained(path, torch_dtype=torch.bfloat16).to(self.device)
320
+ self.tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True)
321
+ self.model.config.pad_token_id = self.tokenizer.pad_token_id
322
+ self.model.config.bos_token_id = self.tokenizer.bos_token_id
323
+ self.model.config.eos_token_id = self.tokenizer.eos_token_id
324
+ unique_entities_p3 = pd.read_csv('https://huggingface.co/insilicomedicine/precious3-gpt/raw/main/all_entities_with_type.csv')
325
+ self.unique_compounds_p3 = [i.strip() for i in unique_entities_p3[unique_entities_p3.type=='compound'].entity.to_list()]
326
+ self.unique_genes_p3 = [i.strip() for i in unique_entities_p3[unique_entities_p3.type=='gene'].entity.to_list()]
327
+
328
+ self.emb_gpt_genes = pd.read_pickle('https://huggingface.co/insilicomedicine/precious3-gpt-multi-modal/resolve/main/multi-modal-data/emb_gpt_genes.pickle')
329
+ self.emb_hgt_genes = pd.read_pickle('https://huggingface.co/insilicomedicine/precious3-gpt-multi-modal/resolve/main/multi-modal-data/emb_hgt_genes.pickle')
330
+
331
+
332
+ def create_prompt(self, prompt_config):
333
+
334
+ prompt = "[BOS]"
335
+
336
+ multi_modal_prefix = '<modality0><modality1><modality2><modality3>'*3
337
+
338
+ for k, v in prompt_config.items():
339
+ if k=='instruction':
340
+ prompt+=f'<{v}>' if isinstance(v, str) else "".join([f'<{v_i}>' for v_i in v])
341
+ elif k=='up':
342
+ if v:
343
+ prompt+=f'{multi_modal_prefix}<{k}>{v} </{k}>' if isinstance(v, str) else f'{multi_modal_prefix}<{k}>{" ".join(v)} </{k}>'
344
+ elif k=='down':
345
+ if v:
346
+ prompt+=f'{multi_modal_prefix}<{k}>{v} </{k}>' if isinstance(v, str) else f'{multi_modal_prefix}<{k}>{" ".join(v)} </{k}>'
347
+ elif k=='age':
348
+ if isinstance(v, int):
349
+ if prompt_config['species'].strip() == 'human':
350
+ prompt+=f'<{k}_individ>{v} </{k}_individ>'
351
+ elif prompt_config['species'].strip() == 'macaque':
352
+ prompt+=f'<{k}_individ>Macaca-{int(v/20)} </{k}_individ>'
353
+ else:
354
+ if v:
355
+ prompt+=f'<{k}>{v.strip()} </{k}>' if isinstance(v, str) else f'<{k}>{" ".join(v)} </{k}>'
356
+ else:
357
+ prompt+=f'<{k}></{k}>'
358
+ return prompt
359
+
360
+ def custom_generate(self,
361
+ input_ids,
362
+ acc_embs_up_kg_mean,
363
+ acc_embs_down_kg_mean,
364
+ acc_embs_up_txt_mean,
365
+ acc_embs_down_txt_mean,
366
+ device,
367
+ max_new_tokens,
368
+ mode,
369
+ temperature=0.8,
370
+ top_p=0.2, top_k=3550,
371
+ n_next_tokens=50, num_return_sequences=1, random_seed=138):
372
+
373
+ torch.manual_seed(random_seed)
374
+
375
+ # Set parameters
376
+ # temperature - Higher value for more randomness, lower for more control
377
+ # top_p - Probability threshold for nucleus sampling (aka top-p sampling)
378
+ # top_k - Ignore logits below the top-k value to reduce randomness (if non-zero)
379
+ # n_next_tokens - Number of top next tokens when predicting compounds
380
+
381
+ modality0_emb = torch.unsqueeze(torch.from_numpy(acc_embs_up_kg_mean), 0).to(device) if isinstance(acc_embs_up_kg_mean, np.ndarray) else None
382
+ modality1_emb = torch.unsqueeze(torch.from_numpy(acc_embs_down_kg_mean), 0).to(device) if isinstance(acc_embs_down_kg_mean, np.ndarray) else None
383
+ modality2_emb = torch.unsqueeze(torch.from_numpy(acc_embs_up_txt_mean), 0).to(device) if isinstance(acc_embs_up_txt_mean, np.ndarray) else None
384
+ modality3_emb = torch.unsqueeze(torch.from_numpy(acc_embs_down_txt_mean), 0).to(device) if isinstance(acc_embs_down_txt_mean, np.ndarray) else None
385
+
386
+
387
+ # Generate sequences
388
+ outputs = []
389
+ next_token_compounds = []
390
+ next_token_up_genes = []
391
+ next_token_down_genes = []
392
+
393
+ for _ in range(num_return_sequences):
394
+ start_time = time.time()
395
+ generated_sequence = []
396
+ current_token = input_ids.clone()
397
+
398
+ for _ in range(max_new_tokens): # Maximum length of generated sequence
399
+ # Forward pass through the model
400
+ logits = self.model.forward(
401
+ input_ids=current_token,
402
+ modality0_emb=modality0_emb,
403
+ modality0_token_id=self.tokenizer.encode('<modality0>')[0], # 62191,
404
+ modality1_emb=modality1_emb,
405
+ modality1_token_id=self.tokenizer.encode('<modality1>')[0], # 62192,
406
+ modality2_emb=modality2_emb,
407
+ modality2_token_id=self.tokenizer.encode('<modality2>')[0], # 62193,
408
+ modality3_emb=modality3_emb,
409
+ modality3_token_id=self.tokenizer.encode('<modality3>')[0], # 62194
410
+ )[0]
411
+
412
+ # Apply temperature to logits
413
+ if temperature != 1.0:
414
+ logits = logits / temperature
415
+
416
+ # Apply top-p sampling (nucleus sampling)
417
+ sorted_logits, sorted_indices = torch.sort(logits, descending=True)
418
+ cumulative_probs = torch.cumsum(torch.softmax(sorted_logits, dim=-1), dim=-1)
419
+ sorted_indices_to_remove = cumulative_probs > top_p
420
+
421
+ if top_k > 0:
422
+ sorted_indices_to_remove[..., top_k:] = 1
423
+
424
+ # Set the logit values of the removed indices to a very small negative value
425
+ inf_tensor = torch.tensor(float("-inf")).type(torch.bfloat16).to(logits.device)
426
+
427
+ logits = logits.where(sorted_indices_to_remove, inf_tensor)
428
+
429
+
430
+ # Sample the next token
431
+ if current_token[0][-1] == self.tokenizer.encode('<drug>')[0] and len(next_token_compounds)==0:
432
+ next_token_compounds.append(torch.topk(torch.softmax(logits, dim=-1)[0][len(current_token[0])-1, :].flatten(), n_next_tokens).indices)
433
+
434
+ # Sample the next token for UP genes
435
+ if current_token[0][-1] == self.tokenizer.encode('<up>')[0] and len(next_token_up_genes)==0:
436
+ next_token_up_genes.append(torch.topk(torch.softmax(logits, dim=-1)[0][len(current_token[0])-1, :].flatten(), n_next_tokens).indices)
437
+
438
+ # Sample the next token for DOWN genes
439
+ if current_token[0][-1] == self.tokenizer.encode('<down>')[0] and len(next_token_down_genes)==0:
440
+ next_token_down_genes.append(torch.topk(torch.softmax(logits, dim=-1)[0][len(current_token[0])-1, :].flatten(), n_next_tokens).indices)
441
+
442
+ next_token = torch.multinomial(torch.softmax(logits, dim=-1)[0], num_samples=1)[len(current_token[0])-1, :].unsqueeze(0)
443
+
444
+
445
+ # Append the sampled token to the generated sequence
446
+ generated_sequence.append(next_token.item())
447
+
448
+ # Stop generation if an end token is generated
449
+ if next_token == self.tokenizer.eos_token_id:
450
+ break
451
+
452
+ # Prepare input for the next iteration
453
+ current_token = torch.cat((current_token, next_token), dim=-1)
454
+ print(time.time()-start_time)
455
+ outputs.append(generated_sequence)
456
+
457
+ # Process generated up/down lists
458
+ processed_outputs = {"up": [], "down": []}
459
+ if mode in ['meta2diff', 'meta2diff2compound']:
460
+ predicted_up_genes_tokens = [self.tokenizer.convert_ids_to_tokens(j) for j in next_token_up_genes]
461
+ predicted_up_genes = []
462
+ for j in predicted_up_genes_tokens:
463
+ generated_up_sample = [i.strip() for i in j]
464
+ predicted_up_genes.append(sorted(set(generated_up_sample) & set(self.unique_genes_p3), key = generated_up_sample.index))
465
+ processed_outputs['up'] = predicted_up_genes
466
+
467
+ predicted_down_genes_tokens = [self.tokenizer.convert_ids_to_tokens(j) for j in next_token_down_genes]
468
+ predicted_down_genes = []
469
+ for j in predicted_down_genes_tokens:
470
+ generated_down_sample = [i.strip() for i in j]
471
+ predicted_down_genes.append(sorted(set(generated_down_sample) & set(self.unique_genes_p3), key = generated_down_sample.index))
472
+ processed_outputs['down'] = predicted_down_genes
473
+
474
+ else:
475
+ processed_outputs = outputs
476
+
477
+ predicted_compounds_ids = [self.tokenizer.convert_ids_to_tokens(j) for j in next_token_compounds]
478
+ predicted_compounds = []
479
+ for j in predicted_compounds_ids:
480
+ predicted_compounds.append([i.strip() for i in j])
481
+
482
+ return processed_outputs, predicted_compounds, random_seed
483
+
484
+
485
+ def __call__(self, data: Dict[str, Any]) -> Dict[str, str]:
486
+ """
487
+ Args:
488
+ data (:dict:):
489
+ The payload with the text prompt and generation parameters.
490
+ """
491
+ data = data.copy()
492
+ parameters = data.pop("parameters", None)
493
+ config_data = data.pop("inputs", None)
494
+ mode = data.pop('mode', 'Not specified')
495
+
496
+ prompt = self.create_prompt(config_data)
497
+ if mode != "diff2compound":
498
+ prompt+="<up>"
499
+
500
+ inputs = self.tokenizer(prompt, return_tensors="pt")
501
+ input_ids = inputs["input_ids"].to(self.device)
502
+
503
+ max_new_tokens = self.model.config.max_seq_len - len(input_ids[0])
504
+ try:
505
+ if set(["up", "down"]) & set(config_data.keys()):
506
+ acc_embs_up1 = []
507
+ acc_embs_up2 = []
508
+ for gs in config_data['up']:
509
+ try:
510
+ acc_embs_up1.append(self.emb_hgt_genes[self.emb_hgt_genes.gene_symbol==gs].embs.values[0])
511
+ acc_embs_up2.append(self.emb_gpt_genes[self.emb_gpt_genes.gene_symbol==gs].embs.values[0])
512
+ except Exception as e:
513
+ pass
514
+ acc_embs_up1_mean = np.array(acc_embs_up1).mean(0) if acc_embs_up1 else None
515
+ acc_embs_up2_mean = np.array(acc_embs_up2).mean(0) if acc_embs_up2 else None
516
+
517
+ acc_embs_down1 = []
518
+ acc_embs_down2 = []
519
+ for gs in config_data['down']:
520
+ try:
521
+ acc_embs_down1.append(self.emb_hgt_genes[self.emb_hgt_genes.gene_symbol==gs].embs.values[0])
522
+ acc_embs_down2.append(self.emb_gpt_genes[self.emb_gpt_genes.gene_symbol==gs].embs.values[0])
523
+ except Exception as e:
524
+ pass
525
+ acc_embs_down1_mean = np.array(acc_embs_down1).mean(0) if acc_embs_down1 else None
526
+ acc_embs_down2_mean = np.array(acc_embs_down2).mean(0) if acc_embs_down2 else None
527
+ else:
528
+ acc_embs_up1_mean, acc_embs_up2_mean, acc_embs_down1_mean, acc_embs_down2_mean = None, None, None, None
529
+
530
+ generated_sequence, raw_next_token_generation, out_seed = self.custom_generate(input_ids = input_ids,
531
+ acc_embs_up_kg_mean=acc_embs_up1_mean,
532
+ acc_embs_down_kg_mean=acc_embs_down1_mean,
533
+ acc_embs_up_txt_mean=acc_embs_up2_mean,
534
+ acc_embs_down_txt_mean=acc_embs_down2_mean, max_new_tokens=max_new_tokens, mode=mode,
535
+ device=self.device, **parameters)
536
+ next_token_generation = [sorted(set(i) & set(self.unique_compounds_p3), key = i.index) for i in raw_next_token_generation]
537
+
538
+ if mode == "meta2diff":
539
+ outputs = {"up": generated_sequence['up'], "down": generated_sequence['down']}
540
+ out = {"output": outputs, "mode": mode, "message": "Done!", "input": prompt, 'random_seed': out_seed}
541
+ elif mode == "meta2diff2compound":
542
+ outputs = {"up": generated_sequence['up'], "down": generated_sequence['down']}
543
+ out = {
544
+ "output": outputs, "compounds": next_token_generation, "raw_output": raw_next_token_generation, "mode": mode,
545
+ "message": "Done!", "input": prompt, 'random_seed': out_seed}
546
+ elif mode == "diff2compound":
547
+ outputs = generated_sequence
548
+ out = {
549
+ "output": outputs, "compounds": next_token_generation, "raw_output": raw_next_token_generation, "mode": mode,
550
+ "message": "Done!", "input": prompt, 'random_seed': out_seed}
551
+ else:
552
+ out = {"message": f"Specify one of the following modes: meta2diff, meta2diff2compound, diff2compound. Your mode is: {mode}"}
553
+
554
+ except Exception as e:
555
+ print(e)
556
+ outputs, next_token_generation = [None], [None]
557
+ out = {"output": outputs, "mode": mode, 'message': f"{e}", "input": prompt, 'random_seed': 138}
558
+
559
+ return out
560
+
561
+ def main():
562
+ pass
563
+
564
+ if __name__=="__main__":
565
+ main()
demo/P3LIB/formula_picker.py ADDED
@@ -0,0 +1,549 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import pickle
3
+ from typing import List, Dict, Optional
4
+ from copy import copy as cp
5
+ import json
6
+
7
+ from abc import ABC, abstractmethod
8
+
9
+
10
+ class TCMEntity(ABC):
11
+ empty_override = True
12
+ desc = ''
13
+ cid = -1
14
+ entity = 'superclass'
15
+
16
+ def __init__(self,
17
+ pref_name: str, desc: str = '',
18
+ synonyms: Optional[List[str]] = None,
19
+ **kwargs):
20
+ self.pref_name = pref_name
21
+ self.desc = desc
22
+ self.synonyms = [] if synonyms is None else [x for x in synonyms if str(x).strip() != 'NA']
23
+
24
+ self.targets = {"known": dict(), "predicted": dict()}
25
+
26
+ self.formulas = []
27
+ self.herbs = []
28
+ self.ingrs = []
29
+
30
+ for k, v in kwargs.items():
31
+ self.__dict__[k] = v
32
+
33
+ def serialize(self):
34
+ init_dict = dict(
35
+ cid=self.cid,
36
+ targets_known=self.targets['known'],
37
+ targets_pred=self.targets['predicted'],
38
+ pref_name=self.pref_name, desc=self.desc,
39
+ synonyms=cp(self.synonyms),
40
+ entity=self.entity
41
+ )
42
+ link_dict = self._get_link_dict()
43
+ out_dict = {"init": init_dict, "links": link_dict}
44
+ return out_dict
45
+
46
+ @classmethod
47
+ def load(cls,
48
+ db: 'TCMDB', ser_dict: dict,
49
+ skip_links = True):
50
+ init_args = ser_dict['init']
51
+
52
+ if skip_links:
53
+ init_args.update({"empty_override":True})
54
+ else:
55
+ init_args.update({"empty_override": False})
56
+
57
+ new_entity = cls(**init_args)
58
+ if not skip_links:
59
+ links = ser_dict['links']
60
+ new_entity._set_links(db, links)
61
+ return (new_entity)
62
+
63
+ def _get_link_dict(self):
64
+ return dict(
65
+ ingrs=[x.cid for x in self.ingrs],
66
+ herbs=[x.pref_name for x in self.herbs],
67
+ formulas=[x.pref_name for x in self.formulas]
68
+ )
69
+
70
+ def _set_links(self, db: 'TCMDB', links: dict):
71
+ for ent_type in links:
72
+ self.__dict__[ent_type] = [db.__dict__[ent_type].get(x) for x in links[ent_type]]
73
+ self.__dict__[ent_type] = [x for x in self.__dict__[ent_type] if x is not None]
74
+
75
+
76
+ class Ingredient(TCMEntity):
77
+ entity: str = 'ingredient'
78
+
79
+ def __init__(self, cid: int,
80
+ targets_pred: Optional[Dict] = None,
81
+ targets_known: Optional[Dict] = None,
82
+ synonyms: Optional[List[str]] = None,
83
+ pref_name: str = '', desc: str = '',
84
+ empty_override: bool = True, **kwargs):
85
+
86
+ if not empty_override:
87
+ assert targets_known is not None or targets_pred is not None, \
88
+ f"Cant submit a compound with no targets at all (CID:{cid})"
89
+
90
+ super().__init__(pref_name, synonyms, desc, **kwargs)
91
+
92
+ self.cid = cid
93
+ self.targets = {
94
+ 'known': targets_known if targets_known is not None else {"symbols": [], 'entrez_ids': []},
95
+ 'predicted': targets_pred if targets_pred is not None else {"symbols": [], 'entrez_ids': []}
96
+ }
97
+
98
+
99
+ class Herb(TCMEntity):
100
+ entity: str = 'herb'
101
+
102
+ def __init__(self, pref_name: str,
103
+ ingrs: Optional[List[Ingredient]] = None,
104
+ synonyms: Optional[List[str]] = None,
105
+ desc: str = '',
106
+ empty_override: bool = True, **kwargs):
107
+
108
+ if ingrs is None:
109
+ ingrs = []
110
+
111
+ if not ingrs and not empty_override:
112
+ raise ValueError(f"No ingredients provided for {pref_name}")
113
+
114
+ super().__init__(pref_name, synonyms, desc, **kwargs)
115
+
116
+ self.ingrs = ingrs
117
+
118
+ def is_same(self, other: 'Herb') -> bool:
119
+ if len(self.ingrs) != len(other.ingrs):
120
+ return False
121
+ this_ingrs = set(x.cid for x in self.ingrs)
122
+ other_ingrs = set(x.cid for x in other.ingrs)
123
+ return this_ingrs == other_ingrs
124
+
125
+
126
+ class Formula(TCMEntity):
127
+ entity: str = 'formula'
128
+
129
+ def __init__(self, pref_name: str,
130
+ herbs: Optional[List[Herb]] = None,
131
+ synonyms: Optional[List[str]] = None,
132
+ desc: str = '',
133
+ empty_override: bool = False, **kwargs):
134
+
135
+ if herbs is None:
136
+ herbs = []
137
+
138
+ if not herbs and not empty_override:
139
+ raise ValueError(f"No herbs provided for {pref_name}")
140
+
141
+ super().__init__(pref_name, synonyms, desc, **kwargs)
142
+ self.herbs = herbs
143
+
144
+ def is_same(self, other: 'Formula') -> bool:
145
+ if len(self.herbs) != len(other.herbs):
146
+ return False
147
+ this_herbs = set(x.pref_name for x in self.herbs)
148
+ other_herbs = set(x.pref_name for x in other.herbs)
149
+ return this_herbs == other_herbs
150
+
151
+
152
+ class TCMDB:
153
+ hf_repo: str = "f-galkin/batman2"
154
+ hf_subsets: Dict[str, str] = {'formulas': 'batman_formulas',
155
+ 'herbs': 'batman_herbs',
156
+ 'ingredients': 'batman_ingredients'}
157
+
158
+ def __init__(self, p_batman: str):
159
+ p_batman = p_batman.removesuffix("/") + "/"
160
+
161
+ self.batman_files = dict(p_formulas='formula_browse.txt',
162
+ p_herbs='herb_browse.txt',
163
+ p_pred_by_tg='predicted_browse_by_targets.txt',
164
+ p_known_by_tg='known_browse_by_targets.txt',
165
+ p_pred_by_ingr='predicted_browse_by_ingredinets.txt',
166
+ p_known_by_ingr='known_browse_by_ingredients.txt')
167
+
168
+ self.batman_files = {x: p_batman + y for x, y in self.batman_files.items()}
169
+
170
+ self.ingrs = None
171
+ self.herbs = None
172
+ self.formulas = None
173
+
174
+ @classmethod
175
+ def make_new_db(cls, p_batman: str):
176
+ new_db = cls(p_batman)
177
+
178
+ new_db.parse_ingredients()
179
+ new_db.parse_herbs()
180
+ new_db.parse_formulas()
181
+
182
+ return (new_db)
183
+
184
+ def parse_ingredients(self):
185
+
186
+ pred_tgs = pd.read_csv(self.batman_files['p_pred_by_tg'],
187
+ sep='\t', index_col=None, header=0,
188
+ na_filter=False)
189
+ known_tgs = pd.read_csv(self.batman_files['p_known_by_tg'],
190
+ sep='\t', index_col=None, header=0,
191
+ na_filter=False)
192
+ entrez_to_symb = {int(pred_tgs.loc[x, 'entrez_gene_id']): pred_tgs.loc[x, 'entrez_gene_symbol'] for x in
193
+ pred_tgs.index}
194
+ # 9927 gene targets
195
+ entrez_to_symb.update({int(known_tgs.loc[x, 'entrez_gene_id']): \
196
+ known_tgs.loc[x, 'entrez_gene_symbol'] for x in known_tgs.index})
197
+
198
+ known_ingreds = pd.read_csv(self.batman_files['p_known_by_ingr'],
199
+ index_col=0, header=0, sep='\t',
200
+ na_filter=False)
201
+ # this BATMAN table is badly formatted
202
+ # you cant just read it
203
+ # df_pred = pd.read_csv(p_pred, index_col=0, header=0, sep='\t')
204
+ pred_ingreds = dict()
205
+ with open(self.batman_files['p_pred_by_ingr'], 'r') as f:
206
+ # skip header
207
+ f.readline()
208
+ newline = f.readline()
209
+ while newline != '':
210
+ cid, other_line = newline.split(' ', 1)
211
+ name, entrez_ids = other_line.rsplit(' ', 1)
212
+ entrez_ids = [int(x.split("(")[0]) for x in entrez_ids.split("|") if not x == "\n"]
213
+ pred_ingreds[int(cid)] = {"targets": entrez_ids, 'name': name}
214
+ newline = f.readline()
215
+
216
+ all_BATMAN_CIDs = list(set(pred_ingreds.keys()) | set(known_ingreds.index))
217
+ all_BATMAN_CIDs = [int(x) for x in all_BATMAN_CIDs if str(x).strip() != 'NA']
218
+
219
+ # get targets for selected cpds
220
+ ingredients = dict()
221
+ for cid in all_BATMAN_CIDs:
222
+ known_name, pred_name, synonyms = None, None, []
223
+ if cid in known_ingreds.index:
224
+ known_name = known_ingreds.loc[cid, 'IUPAC_name']
225
+ known_symbs = known_ingreds.loc[cid, 'known_target_proteins'].split("|")
226
+ else:
227
+ known_symbs = []
228
+
229
+ pred_ids = pred_ingreds.get(cid, [])
230
+ if pred_ids:
231
+ pred_name = pred_ids.get('name')
232
+ if known_name is None:
233
+ cpd_name = pred_name
234
+ elif known_name != pred_name:
235
+ cpd_name = min([known_name, pred_name], key=lambda x: sum([x.count(y) for y in "'()-[]1234567890"]))
236
+ synonyms = [x for x in [known_name, pred_name] if x != cpd_name]
237
+
238
+ pred_ids = pred_ids.get('targets', [])
239
+
240
+ ingredients[cid] = dict(pref_name=cpd_name,
241
+ synonyms=synonyms,
242
+ targets_known={"symbols": known_symbs,
243
+ "entrez_ids": [int(x) for x, y in entrez_to_symb.items() if
244
+ y in known_symbs]},
245
+ targets_pred={"symbols": [entrez_to_symb.get(x) for x in pred_ids],
246
+ "entrez_ids": pred_ids})
247
+ ingredients_objs = {x: Ingredient(cid=x, **y) for x, y in ingredients.items()}
248
+ self.ingrs = ingredients_objs
249
+
250
+ def parse_herbs(self):
251
+ if self.ingrs is None:
252
+ raise ValueError("Herbs cannot be added before the ingredients")
253
+ # load the herbs file
254
+ name_cols = ['Pinyin.Name', 'Chinese.Name', 'English.Name', 'Latin.Name']
255
+ herbs_df = pd.read_csv(self.batman_files['p_herbs'],
256
+ index_col=None, header=0, sep='\t',
257
+ na_filter=False)
258
+ for i in herbs_df.index:
259
+
260
+ herb_name = herbs_df.loc[i, 'Pinyin.Name'].strip()
261
+ if herb_name == 'NA':
262
+ herb_name = [x.strip() for x in herbs_df.loc[i, name_cols].tolist() if not x == 'NA']
263
+ herb_name = [x for x in herb_name if x != '']
264
+ if not herb_name:
265
+ raise ValueError(f"LINE {i}: provided a herb with no names")
266
+ else:
267
+ herb_name = herb_name[-1]
268
+
269
+ herb_cids = herbs_df.loc[i, 'Ingredients'].split("|")
270
+
271
+ herb_cids = [x.split("(")[-1].removesuffix(")").strip() for x in herb_cids]
272
+ herb_cids = [int(x) for x in herb_cids if x.isnumeric()]
273
+
274
+ missed_ingrs = [x for x in herb_cids if self.ingrs.get(x) is None]
275
+ for cid in missed_ingrs:
276
+ self.add_ingredient(cid=int(cid), pref_name='',
277
+ empty_override=True)
278
+ herb_ingrs = [self.ingrs[int(x)] for x in herb_cids]
279
+
280
+ self.add_herb(pref_name=herb_name,
281
+ ingrs=herb_ingrs,
282
+ synonyms=[x for x in herbs_df.loc[i, name_cols].tolist() if not x == "NA"],
283
+ empty_override=True)
284
+
285
+ def parse_formulas(self):
286
+ if self.herbs is None:
287
+ raise ValueError("Formulas cannot be added before the herbs")
288
+ formulas_df = pd.read_csv(self.batman_files['p_formulas'], index_col=None, header=0,
289
+ sep='\t', na_filter=False)
290
+ for i in formulas_df.index:
291
+
292
+ composition = formulas_df.loc[i, 'Pinyin.composition'].split(",")
293
+ composition = [x.strip() for x in composition if not x.strip() == 'NA']
294
+ if not composition:
295
+ continue
296
+
297
+ missed_herbs = [x.strip() for x in composition if self.herbs.get(x) is None]
298
+ for herb in missed_herbs:
299
+ self.add_herb(pref_name=herb,
300
+ desc='Missing in the original herb catalog, but present among formula components',
301
+ ingrs=[], empty_override=True)
302
+
303
+ formula_herbs = [self.herbs[x] for x in composition]
304
+ self.add_formula(pref_name=formulas_df.loc[i, 'Pinyin.Name'].strip(),
305
+ synonyms=[formulas_df.loc[i, 'Chinese.Name']],
306
+ herbs=formula_herbs)
307
+
308
+ def add_ingredient(self, **kwargs):
309
+ if self.ingrs is None:
310
+ self.ingrs = dict()
311
+
312
+ new_ingr = Ingredient(**kwargs)
313
+ if not new_ingr.cid in self.ingrs:
314
+ self.ingrs.update({new_ingr.cid: new_ingr})
315
+
316
+ def add_herb(self, **kwargs):
317
+ if self.herbs is None:
318
+ self.herbs = dict()
319
+
320
+ new_herb = Herb(**kwargs)
321
+ old_herb = self.herbs.get(new_herb.pref_name)
322
+ if not old_herb is None:
323
+ if_same = new_herb.is_same(old_herb)
324
+ if if_same:
325
+ return
326
+
327
+ same_name = new_herb.pref_name
328
+ all_dupes = [self.herbs[x] for x in self.herbs if x.split('~')[0] == same_name] + [new_herb]
329
+ new_names = [same_name + f"~{x + 1}" for x in range(len(all_dupes))]
330
+ for i, duped in enumerate(all_dupes):
331
+ duped.pref_name = new_names[i]
332
+ self.herbs.pop(same_name)
333
+ self.herbs.update({x.pref_name: x for x in all_dupes})
334
+ else:
335
+ self.herbs.update({new_herb.pref_name: new_herb})
336
+
337
+ for cpd in new_herb.ingrs:
338
+ cpd_herbs = [x.pref_name for x in cpd.herbs]
339
+ if not new_herb.pref_name in cpd_herbs:
340
+ cpd.herbs.append(new_herb)
341
+
342
+ def add_formula(self, **kwargs):
343
+
344
+ if self.formulas is None:
345
+ self.formulas = dict()
346
+
347
+ new_formula = Formula(**kwargs)
348
+ old_formula = self.formulas.get(new_formula.pref_name)
349
+ if not old_formula is None:
350
+ is_same = new_formula.is_same(old_formula)
351
+ if is_same:
352
+ return
353
+ same_name = new_formula.pref_name
354
+ all_dupes = [self.formulas[x] for x in self.formulas if x.split('~')[0] == same_name] + [new_formula]
355
+ new_names = [same_name + f"~{x + 1}" for x in range(len(all_dupes))]
356
+ for i, duped in enumerate(all_dupes):
357
+ duped.pref_name = new_names[i]
358
+ self.formulas.pop(same_name)
359
+ self.formulas.update({x.pref_name: x for x in all_dupes})
360
+ else:
361
+ self.formulas.update({new_formula.pref_name: new_formula})
362
+
363
+ for herb in new_formula.herbs:
364
+ herb_formulas = [x.pref_name for x in herb.formulas]
365
+ if not new_formula.pref_name in herb_formulas:
366
+ herb.formulas.append(new_formula)
367
+
368
+ def link_ingredients_n_formulas(self):
369
+ for h in self.herbs.values():
370
+ for i in h.ingrs:
371
+ fla_names = set(x.pref_name for x in i.formulas)
372
+ i.formulas += [x for x in h.formulas if not x.pref_name in fla_names]
373
+ for f in h.formulas:
374
+ ingr_cids = set(x.cid for x in f.ingrs)
375
+ f.ingrs += [x for x in h.ingrs if not x.cid in ingr_cids]
376
+
377
+ def serialize(self):
378
+ out_dict = dict(
379
+ ingredients={cid: ingr.serialize() for cid, ingr in self.ingrs.items()},
380
+ herbs={name: herb.serialize() for name, herb in self.herbs.items()},
381
+ formulas={name: formula.serialize() for name, formula in self.formulas.items()}
382
+ )
383
+ return (out_dict)
384
+
385
+ def save_to_flat_json(self, p_out: str):
386
+ ser_db = db.serialize()
387
+ flat_db = dict()
388
+ for ent_type in ser_db:
389
+ for i, obj in ser_db[ent_type].items():
390
+ flat_db[f"{ent_type}:{i}"] = obj
391
+ with open(p_out, "w") as f:
392
+ f.write(json.dumps(flat_db))
393
+
394
+ def save_to_json(self, p_out: str):
395
+ with open(p_out, "w") as f:
396
+ json.dump(self.serialize(), f)
397
+
398
+ @classmethod
399
+ def load(cls, ser_dict: dict):
400
+ db = cls(p_batman="")
401
+
402
+ # make sure to create all entities before you link them together
403
+ db.ingrs = {int(cid): Ingredient.load(db, ingr, skip_links=True) for cid, ingr in
404
+ ser_dict['ingredients'].items()}
405
+ db.herbs = {name: Herb.load(db, herb, skip_links=True) for name, herb in ser_dict['herbs'].items()}
406
+ db.formulas = {name: Formula.load(db, formula, skip_links=True) for name, formula in
407
+ ser_dict['formulas'].items()}
408
+
409
+ # now set the links
410
+ for i in db.ingrs.values():
411
+ # NB: somehow gotta make it work w/out relying on str-int conversion
412
+ i._set_links(db, ser_dict['ingredients'][str(i.cid)]['links'])
413
+ for h in db.herbs.values():
414
+ h._set_links(db, ser_dict['herbs'][h.pref_name]['links'])
415
+ for f in db.formulas.values():
416
+ f._set_links(db, ser_dict['formulas'][f.pref_name]['links'])
417
+ return (db)
418
+
419
+ @classmethod
420
+ def read_from_json(cls, p_file: str):
421
+ with open(p_file, "r") as f:
422
+ json_db = json.load(f)
423
+ db = cls.load(json_db)
424
+ return (db)
425
+
426
+ @classmethod
427
+ def download_from_hf(cls):
428
+ from datasets import load_dataset
429
+ dsets = {x: load_dataset(cls.hf_repo, y) for x, y in cls.hf_subsets.items()}
430
+
431
+ # speed this up somehow
432
+
433
+ known_tgs = {str(x['cid']): [y.split("(") for y in eval(x['targets_known'])] for x in dsets['ingredients']['train']}
434
+ known_tgs = {x:{'symbols':[z[0] for z in y], "entrez_ids":[int(z[1].strip(")")) for z in y]} for x,y in known_tgs.items()}
435
+ pred_tgs = {str(x['cid']): [y.split("(") for y in eval(x['targets_pred'])] for x in dsets['ingredients']['train']}
436
+ pred_tgs = {x:{'symbols':[z[0] for z in y], "entrez_ids":[int(z[1].strip(")")) for z in y]} for x,y in pred_tgs.items()}
437
+
438
+ json_db = dict()
439
+ json_db['ingredients'] = {str(x['cid']): {'init': dict(cid=int(x['cid']),
440
+ targets_known=known_tgs[str(x['cid'])],
441
+ targets_pred=pred_tgs[str(x['cid'])],
442
+ pref_name=x['pref_name'],
443
+ synonyms=eval(x['synonyms']),
444
+ desc=x['description']
445
+ ),
446
+
447
+ 'links': dict(
448
+ herbs=eval(x['herbs']),
449
+ formulas=eval(x['formulas'])
450
+ )
451
+ }
452
+ for x in dsets['ingredients']['train']}
453
+
454
+ json_db['herbs'] = {x['pref_name']: {'init': dict(pref_name=x['pref_name'],
455
+ synonyms=eval(x['synonyms']),
456
+ desc=x['description']),
457
+ 'links': dict(ingrs=eval(x['ingredients']),
458
+ formulas=eval(x['formulas']))} for x in
459
+ dsets['herbs']['train']}
460
+
461
+ json_db['formulas'] = {x['pref_name']: {'init': dict(pref_name=x['pref_name'],
462
+ synonyms=eval(x['synonyms']),
463
+ desc=x['description']),
464
+ 'links': dict(ingrs=eval(x['ingredients']),
465
+ herbs=eval(x['herbs']))} for x in
466
+ dsets['formulas']['train']}
467
+
468
+ db = cls.load(json_db)
469
+ return (db)
470
+
471
+ def drop_isolated(self, how='any'):
472
+ match how:
473
+ case 'any':
474
+ self.herbs = {x: y for x, y in self.herbs.items() if (y.ingrs and y.formulas)}
475
+ self.formulas = {x: y for x, y in self.formulas.items() if (y.ingrs and y.herbs)}
476
+ self.ingrs = {x: y for x, y in self.ingrs.items() if (y.formulas and y.herbs)}
477
+ case 'all':
478
+ self.herbs = {x: y for x, y in self.herbs.items() if (y.ingrs or y.formulas)}
479
+ self.formulas = {x: y for x, y in self.formulas.items() if (y.ingrs or y.herbs)}
480
+ self.ingrs = {x: y for x, y in self.ingrs.items() if (y.formulas or y.herbs)}
481
+ case _:
482
+ raise ValueError(f'Unknown how parameter: {how}. Known parameters are "any" and "all"')
483
+
484
+ def select_formula_by_cpd(self, cids: List):
485
+ cids = set(x for x in cids if x in self.ingrs)
486
+ if not cids:
487
+ return
488
+ cpd_counts = {x: len(set([z.cid for z in y.ingrs]) & cids) for x, y in self.formulas.items()}
489
+ n_max = max(cpd_counts.values())
490
+ if n_max == 0:
491
+ return (n_max, [])
492
+ selected = [x for x, y in cpd_counts.items() if y == n_max]
493
+ return (n_max, selected)
494
+
495
+ def pick_formula_by_cpd(self, cids: List):
496
+ cids = [x for x in cids if x in self.ingrs]
497
+ if not cids:
498
+ return
499
+ raise NotImplementedError()
500
+
501
+ def select_formula_by_herb(self, herbs: List):
502
+ raise NotImplementedError()
503
+
504
+ def pick_formula_by_herb(self, herbs: List):
505
+ raise NotImplementedError()
506
+
507
+
508
+ def main(ab_initio=False,
509
+ p_BATMAN="./BATMAN/",
510
+ fname='BATMAN_DB.json'):
511
+ p_BATMAN = p_BATMAN.removesuffix("/") + "/"
512
+ # Use in case you want to recreate the TCMDB database of Chinese medicine from BATMAN files
513
+ if ab_initio:
514
+ db = TCMDB.make_new_db(p_BATMAN)
515
+ db.link_ingredients_n_formulas()
516
+ db.save_to_json(p_BATMAN + fname)
517
+ # db.save_to_json('../TCM screening/BATMAN_DB.json')
518
+
519
+ else:
520
+ db = TCMDB.read_from_json('../TCM screening/BATMAN_DB.json')
521
+ # db = TCMDB.read_from_json(p_BATMAN + fname)
522
+
523
+ cids = [969516, # curcumin
524
+ 445154, # resveratrol
525
+ 5280343, # quercetin
526
+ 6167, # colchicine
527
+ 5280443, # apigening
528
+ 65064, # EGCG3
529
+ 5757, # estradiol
530
+ 5994, # progesterone
531
+ 5280863, # kaempferol
532
+ 107985, # triptolide
533
+ 14985, # alpha-tocopherol
534
+ 1548943, # Capsaicin
535
+ 64982, # Baicalin
536
+ 6013, # Testosterone
537
+ ]
538
+
539
+ p3_formula = db.select_formula_by_cpd(cids)
540
+ # somehow save file if needed ↓
541
+ ser_db = db.serialize()
542
+
543
+
544
+ ###
545
+
546
+ if __name__ == '__main__':
547
+ main(ab_initio=True, p_BATMAN="./BATMAN/", fname='BATMAN_DB.json')
548
+
549
+