Pringled commited on
Commit
2a0be82
1 Parent(s): f39d105
Files changed (1) hide show
  1. app.py +4 -373
app.py CHANGED
@@ -14,20 +14,6 @@ default_dataset_split = "train"
14
  default_text_column = "sentence"
15
  default_threshold = 0.9
16
 
17
- # def batch_iterable(iterable, batch_size):
18
- # """Yield successive batches from an iterable."""
19
- # for i in range(0, len(iterable), batch_size):
20
- # yield iterable[i:i + batch_size]
21
-
22
- # def compute_embeddings(texts, batch_size, progress, desc):
23
- # """Compute embeddings for a list of texts with progress tracking."""
24
- # embeddings = []
25
- # total_batches = (len(texts) + batch_size - 1) // batch_size
26
- # for i, batch_texts in enumerate(batch_iterable(texts, batch_size)):
27
- # embeddings.append(model.encode(batch_texts, show_progressbar=False))
28
- # progress((i + 1) / total_batches, desc=desc)
29
- # return np.concatenate(embeddings, axis=0)
30
-
31
  def deduplicate_embeddings(
32
  embeddings_a: np.ndarray,
33
  embeddings_b: np.ndarray = None,
@@ -101,8 +87,8 @@ def perform_deduplication(
101
 
102
  num_duplicates = len(duplicate_mapping)
103
  result_text = (
104
- f"**Total documents:** {len(texts1)}\n"
105
- f"**Duplicates found:** {num_duplicates}\n"
106
  f"**Unique documents after deduplication:** {len(deduplicated_indices)}\n\n"
107
  )
108
 
@@ -138,8 +124,8 @@ def perform_deduplication(
138
 
139
  num_duplicates = len(duplicate_indices)
140
  result_text = (
141
- f"**Total documents in {dataset2_name}/{dataset2_split}:** {len(texts2)}\n"
142
- f"**Duplicates found in Dataset 2:** {num_duplicates}\n"
143
  f"**Unique documents after deduplication:** {len(texts2) - num_duplicates}\n\n"
144
  )
145
 
@@ -212,358 +198,3 @@ with gr.Blocks(css="#status_output { height: 150px; overflow: auto; }") as demo:
212
  )
213
 
214
  demo.launch()
215
-
216
- # import gradio as gr
217
- # from datasets import load_dataset
218
- # import numpy as np
219
- # from model2vec import StaticModel
220
- # from reach import Reach
221
- # from difflib import ndiff
222
-
223
- # # Load the model at startup
224
- # model = StaticModel.from_pretrained("minishlab/M2V_base_output")
225
-
226
- # # Default dataset parameters
227
- # default_dataset1_name = "sst2"
228
- # default_dataset1_split = "train"
229
- # default_dataset2_name = "sst2"
230
- # default_dataset2_split = "validation"
231
- # default_text_column = "sentence"
232
- # default_threshold = 0.9
233
-
234
- # # Load the default datasets at startup
235
- # ds_default1 = load_dataset(default_dataset1_name, split=default_dataset1_split)
236
- # ds_default2 = load_dataset(default_dataset2_name, split=default_dataset2_split)
237
-
238
- # def batch_iterable(iterable, batch_size):
239
- # """Helper function to create batches from an iterable."""
240
- # for i in range(0, len(iterable), batch_size):
241
- # yield iterable[i:i + batch_size]
242
-
243
- # def compute_embeddings(texts, batch_size, progress, desc="Computing embeddings"):
244
- # embeddings = []
245
- # total_batches = (len(texts) + batch_size - 1) // batch_size
246
- # for i, batch_texts in enumerate(batch_iterable(texts, batch_size)):
247
- # batch_embeddings = model.encode(batch_texts, show_progressbar=False)
248
- # embeddings.append(batch_embeddings)
249
- # progress((i + 1) / total_batches, desc=desc)
250
- # return np.concatenate(embeddings, axis=0)
251
-
252
- # def deduplicate(
253
- # embedding_matrix: np.ndarray,
254
- # threshold: float,
255
- # batch_size: int = 1024,
256
- # progress=None
257
- # ) -> tuple[np.ndarray, dict[int, int]]:
258
- # # Building the index
259
- # progress(0, desc="Building search index...")
260
- # reach = Reach(
261
- # vectors=embedding_matrix, items=[str(i) for i in range(len(embedding_matrix))]
262
- # )
263
-
264
- # deduplicated_indices = set(range(len(embedding_matrix)))
265
- # duplicate_to_original_mapping = {}
266
-
267
- # # Finding nearest neighbors
268
- # progress(0, desc="Finding nearest neighbors...")
269
- # results = reach.nearest_neighbor_threshold(
270
- # embedding_matrix,
271
- # threshold=threshold,
272
- # batch_size=batch_size,
273
- # show_progressbar=False, # Disable internal progress bar
274
- # )
275
-
276
- # # Processing duplicates with a progress bar
277
- # total_items = len(embedding_matrix)
278
- # for i, similar_items in enumerate(
279
- # progress.tqdm(results, desc="Processing duplicates", total=total_items)
280
- # ):
281
- # if i not in deduplicated_indices:
282
- # continue
283
-
284
- # similar_indices = [int(item[0]) for item in similar_items if int(item[0]) != i]
285
-
286
- # for sim_idx in similar_indices:
287
- # if sim_idx in deduplicated_indices:
288
- # deduplicated_indices.remove(sim_idx)
289
- # duplicate_to_original_mapping[sim_idx] = i
290
-
291
- # return np.array(list(deduplicated_indices)), duplicate_to_original_mapping
292
-
293
- # def display_word_differences(x: str, y: str) -> str:
294
- # diff = ndiff(x.split(), y.split())
295
- # return " ".join([word for word in diff if word.startswith(("+", "-"))])
296
-
297
- # def perform_deduplication(
298
- # deduplication_type,
299
- # dataset1_name,
300
- # dataset1_split,
301
- # dataset1_text_column,
302
- # dataset2_name="",
303
- # dataset2_split="",
304
- # dataset2_text_column="",
305
- # threshold=default_threshold,
306
- # progress=gr.Progress(track_tqdm=True),
307
- # ):
308
- # try:
309
- # # Convert threshold to float
310
- # threshold = float(threshold)
311
-
312
- # # Initialize status message
313
- # status = ""
314
-
315
- # if deduplication_type == "Single dataset":
316
- # # Load Dataset 1
317
- # status = "Loading Dataset 1..."
318
- # yield status, ""
319
- # if (
320
- # dataset1_name == default_dataset1_name
321
- # and dataset1_split == default_dataset1_split
322
- # ):
323
- # ds = ds_default1
324
- # else:
325
- # ds = load_dataset(dataset1_name, split=dataset1_split)
326
-
327
- # # Extract texts
328
- # status = "Extracting texts from Dataset 1..."
329
- # yield status, ""
330
- # texts = [example[dataset1_text_column] for example in ds]
331
-
332
- # # Compute embeddings
333
- # status = "Computing embeddings for Dataset 1..."
334
- # yield status, ""
335
- # embedding_matrix = compute_embeddings(
336
- # texts,
337
- # batch_size=64,
338
- # progress=progress,
339
- # desc="Computing embeddings for Dataset 1",
340
- # )
341
-
342
- # # Deduplicate
343
- # status = "Deduplicating embeddings..."
344
- # yield status, ""
345
- # deduplicated_indices, duplicate_to_original_mapping = deduplicate(
346
- # embedding_matrix, threshold, progress=progress
347
- # )
348
-
349
- # # Prepare the results
350
- # num_duplicates = len(duplicate_to_original_mapping)
351
- # num_total = len(texts)
352
- # num_deduplicated = len(deduplicated_indices)
353
-
354
- # result_text = f"**Total documents:** {num_total}\n"
355
- # result_text += f"**Number of duplicates found:** {num_duplicates}\n"
356
- # result_text += (
357
- # f"**Number of unique documents after deduplication:** {num_deduplicated}\n\n"
358
- # )
359
-
360
- # # Show deduplicated examples
361
- # if num_duplicates > 0:
362
- # result_text += "**Examples of duplicates found:**\n\n"
363
- # num_examples = min(5, num_duplicates)
364
- # for duplicate_idx, original_idx in list(duplicate_to_original_mapping.items())[:num_examples]:
365
- # original_text = texts[original_idx]
366
- # duplicate_text = texts[duplicate_idx]
367
- # differences = display_word_differences(original_text, duplicate_text)
368
- # result_text += f"**Original text:**\n{original_text}\n\n"
369
- # result_text += f"**Duplicate text:**\n{duplicate_text}\n\n"
370
- # result_text += f"**Differences:**\n{differences}\n"
371
- # result_text += "-" * 50 + "\n\n"
372
- # else:
373
- # result_text += "No duplicates found."
374
-
375
- # # Final status
376
- # status = "Deduplication completed."
377
- # yield status, result_text
378
-
379
- # elif deduplication_type == "Cross-dataset":
380
- # # Similar code for cross-dataset deduplication
381
- # # Load Dataset 1
382
- # status = "Loading Dataset 1..."
383
- # yield status, ""
384
- # if (
385
- # dataset1_name == default_dataset1_name
386
- # and dataset1_split == default_dataset1_split
387
- # ):
388
- # ds1 = ds_default1
389
- # else:
390
- # ds1 = load_dataset(dataset1_name, split=dataset1_split)
391
-
392
- # # Load Dataset 2
393
- # status = "Loading Dataset 2..."
394
- # yield status, ""
395
- # if (
396
- # dataset2_name == default_dataset2_name
397
- # and dataset2_split == default_dataset2_split
398
- # ):
399
- # ds2 = ds_default2
400
- # else:
401
- # ds2 = load_dataset(dataset2_name, split=dataset2_split)
402
-
403
- # # Extract texts from Dataset 1
404
- # status = "Extracting texts from Dataset 1..."
405
- # yield status, ""
406
- # texts1 = [example[dataset1_text_column] for example in ds1]
407
-
408
- # # Extract texts from Dataset 2
409
- # status = "Extracting texts from Dataset 2..."
410
- # yield status, ""
411
- # texts2 = [example[dataset2_text_column] for example in ds2]
412
-
413
- # # Compute embeddings for Dataset 1
414
- # status = "Computing embeddings for Dataset 1..."
415
- # yield status, ""
416
- # embedding_matrix1 = compute_embeddings(
417
- # texts1,
418
- # batch_size=64,
419
- # progress=progress,
420
- # desc="Computing embeddings for Dataset 1",
421
- # )
422
-
423
- # # Compute embeddings for Dataset 2
424
- # status = "Computing embeddings for Dataset 2..."
425
- # yield status, ""
426
- # embedding_matrix2 = compute_embeddings(
427
- # texts2,
428
- # batch_size=64,
429
- # progress=progress,
430
- # desc="Computing embeddings for Dataset 2",
431
- # )
432
-
433
- # # Deduplicate across datasets
434
- # status = "Deduplicating embeddings across datasets..."
435
- # yield status, ""
436
- # duplicate_indices_in_ds2, duplicate_to_original_mapping = deduplicate_across_datasets(
437
- # embedding_matrix1, embedding_matrix2, threshold, progress=progress
438
- # )
439
-
440
- # num_duplicates = len(duplicate_indices_in_ds2)
441
- # num_total_ds2 = len(texts2)
442
- # num_unique_ds2 = num_total_ds2 - num_duplicates
443
-
444
- # result_text = f"**Total documents in {dataset2_name}/{dataset2_split}:** {num_total_ds2}\n"
445
- # result_text += f"**Number of duplicates found in {dataset2_name}/{dataset2_split}:** {num_duplicates}\n"
446
- # result_text += f"**Number of unique documents in {dataset2_name}/{dataset2_split} after deduplication:** {num_unique_ds2}\n\n"
447
-
448
- # # Show deduplicated examples
449
- # if num_duplicates > 0:
450
- # result_text += "**Examples of duplicates found in Dataset 2:**\n\n"
451
- # num_examples = min(5, num_duplicates)
452
- # for duplicate_idx in duplicate_indices_in_ds2[:num_examples]:
453
- # original_idx = duplicate_to_original_mapping[duplicate_idx]
454
- # original_text = texts1[original_idx]
455
- # duplicate_text = texts2[duplicate_idx]
456
- # differences = display_word_differences(original_text, duplicate_text)
457
- # result_text += f"**Original text (Dataset 1):**\n{original_text}\n\n"
458
- # result_text += f"**Duplicate text (Dataset 2):**\n{duplicate_text}\n\n"
459
- # result_text += f"**Differences:**\n{differences}\n"
460
- # result_text += "-" * 50 + "\n\n"
461
- # else:
462
- # result_text += "No duplicates found."
463
-
464
- # # Final status
465
- # status = "Deduplication completed."
466
- # yield status, result_text
467
-
468
- # except Exception as e:
469
- # yield f"An error occurred: {e}", ""
470
- # raise e
471
-
472
- # def deduplicate_across_datasets(
473
- # embedding_matrix_1: np.ndarray,
474
- # embedding_matrix_2: np.ndarray,
475
- # threshold: float,
476
- # batch_size: int = 1024,
477
- # progress=None
478
- # ) -> tuple[list[int], dict[int, int]]:
479
- # # Building the index from Dataset 1
480
- # progress(0, desc="Building search index from Dataset 1...")
481
- # reach = Reach(
482
- # vectors=embedding_matrix_1, items=[str(i) for i in range(len(embedding_matrix_1))]
483
- # )
484
-
485
- # duplicate_indices_in_test = []
486
- # duplicate_to_original_mapping = {}
487
-
488
- # # Finding nearest neighbors between datasets
489
- # progress(0, desc="Finding nearest neighbors between datasets...")
490
- # results = reach.nearest_neighbor_threshold(
491
- # embedding_matrix_2,
492
- # threshold=threshold,
493
- # batch_size=batch_size,
494
- # show_progressbar=False, # Disable internal progress bar
495
- # )
496
-
497
- # total_items = len(embedding_matrix_2)
498
- # # Processing duplicates with a progress bar
499
- # for i, similar_items in enumerate(
500
- # progress.tqdm(results, desc="Processing duplicates across datasets", total=total_items)
501
- # ):
502
- # similar_indices = [int(item[0]) for item in similar_items if item[1] >= threshold]
503
-
504
- # if similar_indices:
505
- # duplicate_indices_in_test.append(i)
506
- # duplicate_to_original_mapping[i] = similar_indices[0]
507
-
508
- # return duplicate_indices_in_test, duplicate_to_original_mapping
509
-
510
- # # Adjust the height of the status_output component using custom CSS
511
- # with gr.Blocks(css="#status_output { height: 150px; overflow: auto; }") as demo:
512
- # gr.Markdown("# Semantic Deduplication")
513
-
514
- # deduplication_type = gr.Radio(
515
- # choices=["Single dataset", "Cross-dataset"],
516
- # label="Deduplication Type",
517
- # value="Single dataset",
518
- # )
519
-
520
- # with gr.Row():
521
- # dataset1_name = gr.Textbox(value=default_dataset1_name, label="Dataset 1 Name")
522
- # dataset1_split = gr.Textbox(value=default_dataset1_split, label="Dataset 1 Split")
523
- # dataset1_text_column = gr.Textbox(value=default_text_column, label="Text Column Name")
524
-
525
- # dataset2_inputs = gr.Column(visible=False)
526
- # with dataset2_inputs:
527
- # gr.Markdown("### Dataset 2")
528
- # with gr.Row():
529
- # dataset2_name = gr.Textbox(value=default_dataset2_name, label="Dataset 2 Name")
530
- # dataset2_split = gr.Textbox(value=default_dataset2_split, label="Dataset 2 Split")
531
- # dataset2_text_column = gr.Textbox(value=default_text_column, label="Text Column Name")
532
-
533
- # threshold = gr.Slider(
534
- # minimum=0.0, maximum=1.0, value=default_threshold, label="Similarity Threshold"
535
- # )
536
-
537
- # compute_button = gr.Button("Compute")
538
-
539
- # # Use 'gr.Markdown' with 'elem_id' and custom CSS to adjust height
540
- # status_output = gr.Markdown(elem_id="status_output")
541
- # result_output = gr.Markdown()
542
-
543
- # # Function to update the visibility of dataset2_inputs
544
- # def update_visibility(deduplication_type_value):
545
- # if deduplication_type_value == "Cross-dataset":
546
- # return gr.update(visible=True)
547
- # else:
548
- # return gr.update(visible=False)
549
-
550
- # deduplication_type.change(
551
- # update_visibility, inputs=deduplication_type, outputs=dataset2_inputs
552
- # )
553
-
554
- # compute_button.click(
555
- # fn=perform_deduplication,
556
- # inputs=[
557
- # deduplication_type,
558
- # dataset1_name,
559
- # dataset1_split,
560
- # dataset1_text_column,
561
- # dataset2_name,
562
- # dataset2_split,
563
- # dataset2_text_column,
564
- # threshold,
565
- # ],
566
- # outputs=[status_output, result_output],
567
- # )
568
-
569
- # demo.launch()
 
14
  default_text_column = "sentence"
15
  default_threshold = 0.9
16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  def deduplicate_embeddings(
18
  embeddings_a: np.ndarray,
19
  embeddings_b: np.ndarray = None,
 
87
 
88
  num_duplicates = len(duplicate_mapping)
89
  result_text = (
90
+ f"**Total documents:** {len(texts1)}\n\n"
91
+ f"**Duplicates found:** {num_duplicates}\n\n"
92
  f"**Unique documents after deduplication:** {len(deduplicated_indices)}\n\n"
93
  )
94
 
 
124
 
125
  num_duplicates = len(duplicate_indices)
126
  result_text = (
127
+ f"**Total documents in {dataset2_name}/{dataset2_split}:** {len(texts2)}\n\n"
128
+ f"**Duplicates found in Dataset 2:** {num_duplicates}\n\n"
129
  f"**Unique documents after deduplication:** {len(texts2) - num_duplicates}\n\n"
130
  )
131
 
 
198
  )
199
 
200
  demo.launch()