File size: 7,425 Bytes
4921889 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 |
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## ⚠️ For now, do not use this script as there is one minor problem! ⚠️\n",
"### Instead just run `create_meta_data_csv_md.py` once and copy & paste the entire content of `meta_data.md` between `### Catalogue` and `### Example`.\n",
"\n",
"This is a jupyter notebook with two cells/scripts. However, both have the same flaw: `\\n` new line handling is messy as the table does contain entries like `...| test | \\nArticle | test data | ...` that are parsed as line breaks destroying the markdown table. \n",
"\n",
"If you read this and have a quick solution, please share it! I'm currently working on other features and don't have time to investigate further."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 1) Read the meta_data file and update readme"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"import re\n",
"\n",
"def update_readme_with_metadata(readme_path='README.md', metadata_path='meta_data.md'):\n",
" # Step 1: Read the readme.md file\n",
" with open(readme_path, 'r') as file:\n",
" readme_content = file.read()\n",
" \n",
" # Step 2: Identify the section to replace\n",
" pattern = r\"### Catalogue(.*?)### Example\"\n",
" match = re.search(pattern, readme_content, re.DOTALL)\n",
" \n",
" if match:\n",
" # Step 3: Read the metadata.md file\n",
" with open(metadata_path, 'r') as file:\n",
" metadata_content = file.read()\n",
" \n",
" # Step 4: Replace the section\n",
" updated_content = re.sub(pattern, f\"### Catalogue\\n{metadata_content}\\n### Example\", readme_content, flags=re.DOTALL)\n",
" \n",
" # Step 5: Write the updated content back to readme.md\n",
" with open(readme_path, 'w') as file:\n",
" file.write(updated_content)\n",
" else:\n",
" print(\"The specified section was not found in the readme.md file.\")\n",
"\n",
"# Call the function\n",
"update_readme_with_metadata()\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 2) Do everything in one go: parse all json.gz files, create the meta_data file and update readme. Note that this cell might not run in Jupyter due to multiprocessing issues with ipykernel. "
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import json\n",
"import gzip\n",
"import csv\n",
"from multiprocessing import Pool, cpu_count\n",
"import time\n",
"\n",
"def process_json_file(file_path):\n",
" with gzip.open(file_path, 'rt', encoding='utf-8') as gz_file:\n",
" data = json.load(gz_file)\n",
" return data.get('meta', {})\n",
"\n",
"def get_file_size_mb(file_path):\n",
" return round(os.path.getsize(file_path) / (1024 * 1024), 2)\n",
"\n",
"def write_to_csv_and_md(output_csv, output_md, headers, data):\n",
" with open(output_csv, 'w', newline='', encoding='utf-8') as csv_file:\n",
" writer = csv.DictWriter(csv_file, fieldnames=headers)\n",
" writer.writeheader()\n",
" writer.writerows(data)\n",
"\n",
" with open(output_md, 'w', encoding='utf-8') as md_file:\n",
" md_file.write(\"| \" + \" | \".join(headers) + \" |\\n\")\n",
" md_file.write(\"|\" + \"|\".join([\" --- \" for _ in headers]) + \"|\\n\")\n",
"\n",
" for row in data:\n",
" md_file.write(\"| \" + \" | \".join([str(row[header]) for header in headers]) + \" |\\n\")\n",
"\n",
"def process_file(file_name, input_directory, base_url):\n",
" file_path = os.path.join(input_directory, file_name)\n",
" meta_data = process_json_file(file_path)\n",
" file_size_mb = get_file_size_mb(file_path)\n",
"\n",
" row_data = {\n",
" \"filesize\": file_size_mb,\n",
" \"filename\": file_name,\n",
" \"URL\": f\"{base_url}{file_name.replace('.json.gz', '')}\",\n",
" **meta_data\n",
" }\n",
"\n",
" return row_data\n",
"\n",
"def replace_section_in_readme(readme_path, meta_data_path):\n",
" # Read README.md content\n",
" with open(readme_path, 'r', encoding='utf-8') as readme_file:\n",
" readme_content = readme_file.read()\n",
" \n",
" # Read meta_data.md content\n",
" with open(meta_data_path, 'r', encoding='utf-8') as meta_data_file:\n",
" meta_data_content = meta_data_file.read()\n",
" \n",
" # Pattern to identify the section to replace\n",
" pattern = r\"### Catalogue(.*?)### Example\"\n",
" # Use re.sub to replace the section with meta_data_content\n",
" updated_readme_content = re.sub(pattern, f\"### Catalogue{meta_data_content}### Example\", readme_content, flags=re.DOTALL)\n",
" \n",
" # Write the updated content back to README.md\n",
" with open(readme_path, 'w', encoding='utf-8') as readme_file:\n",
" readme_file.write(updated_readme_content)\n",
"\n",
"def main(input_directory, output_csv, output_md, base_url=\"https://do-me.github.io/SemanticFinder/?hf=\"):\n",
" headers = [\n",
" \"filesize\", \"textTitle\", \"textAuthor\", \"textYear\", \"textLanguage\", \"URL\",\n",
" \"modelName\", \"quantized\", \"splitParam\", \"splitType\", \"characters\", \"chunks\",\n",
" \"wordsToAvoidAll\", \"wordsToCheckAll\", \"wordsToAvoidAny\", \"wordsToCheckAny\",\n",
" \"exportDecimals\", \"lines\", \"textNotes\", \"textSourceURL\", \"filename\"\n",
" ]\n",
"\n",
" all_data = []\n",
" \n",
" start_time = time.time()\n",
"\n",
" file_list = [file_name for file_name in os.listdir(input_directory) if file_name.endswith('.json.gz')]\n",
"\n",
" with Pool(cpu_count()) as pool:\n",
" all_data = pool.starmap(process_file, [(file_name, input_directory, base_url) for file_name in file_list])\n",
"\n",
" write_to_csv_and_md(output_csv, output_md, headers, all_data)\n",
" \n",
" replace_section_in_readme(os.path.join(input_directory, 'README.md'), os.path.join(input_directory, output_md))\n",
"\n",
" end_time = time.time()\n",
" processing_time = end_time - start_time\n",
" print(f\"Processing time: {round(processing_time, 2)} seconds\")\n",
"\n",
"if __name__ == \"__main__\":\n",
" input_directory = \".\"\n",
" output_csv = \"meta_data.csv\"\n",
" output_md = \"meta_data.md\"\n",
"\n",
" main(input_directory, output_csv, output_md)\n"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "py3.11",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.0"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
|