Datasets:

Modalities:
Tabular
Text
Formats:
parquet
Languages:
English
ArXiv:
DOI:
Libraries:
Datasets
Dask
License:
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -446,7 +446,7 @@ from datatrove.pipeline.readers import ParquetReader
446
 
447
  # limit determines how many documents will be streamed (remove for all)
448
  data_reader = ParquetReader("hf://datasets/HuggingFaceFW/fineweb-edu", glob_pattern="data/*/*.parquet", limit=1000)
449
- # or to fetch a specific dump CC-MAIN-2024-10, eplace "CC-MAIN-2024-10" with "sample/100BT" to use the 100BT sample
450
  data_reader = ParquetReader("hf://datasets/HuggingFaceFW/fineweb-edu/CC-MAIN-2024-10", limit=1000)
451
  for document in data_reader():
452
  # do something with document
@@ -463,7 +463,7 @@ from datatrove.pipeline.writers import JsonlWriter
463
 
464
  pipeline_exec = LocalPipelineExecutor(
465
  pipeline=[
466
- # replace "CC-MAIN-2024-10" with "sample/100BT" to use the 100BT sample
467
  ParquetReader("hf://datasets/HuggingFaceFW/fineweb-edu/CC-MAIN-2024-10", limit=1000),
468
  LambdaFilter(lambda doc: "hugging" in doc.text),
469
  JsonlWriter("some-output-path")
 
446
 
447
  # limit determines how many documents will be streamed (remove for all)
448
  data_reader = ParquetReader("hf://datasets/HuggingFaceFW/fineweb-edu", glob_pattern="data/*/*.parquet", limit=1000)
449
+ # or to fetch a specific dump CC-MAIN-2024-10, eplace "CC-MAIN-2024-10" with "sample-100BT" to use the 100BT sample
450
  data_reader = ParquetReader("hf://datasets/HuggingFaceFW/fineweb-edu/CC-MAIN-2024-10", limit=1000)
451
  for document in data_reader():
452
  # do something with document
 
463
 
464
  pipeline_exec = LocalPipelineExecutor(
465
  pipeline=[
466
+ # replace "CC-MAIN-2024-10" with "sample-100BT" to use the 100BT sample
467
  ParquetReader("hf://datasets/HuggingFaceFW/fineweb-edu/CC-MAIN-2024-10", limit=1000),
468
  LambdaFilter(lambda doc: "hugging" in doc.text),
469
  JsonlWriter("some-output-path")