{ "cells": [ { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "KQn5He3e0yfq", "outputId": "c9f062c5-9f90-4e2a-a9f2-f27cfd35f9fa" }, "outputs": [], "source": [ "# Install Python Packages as needed\n", "\n", "# !pip install xmltodict\n", "# !pip install lxml\n", "# !pip install fuzzywuzzy" ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "id": "I8lo-m59cG1y" }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "c:\\Users\\wipar\\AppData\\Local\\Programs\\Python\\Python310\\lib\\site-packages\\fuzzywuzzy\\fuzz.py:11: UserWarning: Using slow pure-python SequenceMatcher. Install python-Levenshtein to remove this warning\n", " warnings.warn('Using slow pure-python SequenceMatcher. Install python-Levenshtein to remove this warning')\n" ] } ], "source": [ "import os\n", "import zipfile\n", "import shutil\n", "import lxml.etree as ET\n", "from lxml import etree\n", "import yaml\n", "import xmltodict\n", "from fuzzywuzzy import fuzz\n", "from fuzzywuzzy import process\n", "from xml.etree.ElementTree import Element, ElementTree\n", "from typing import List" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Download the Wikifonia and MScoreLib data bases and place them at the same level as this notebook. Rename them to `wikifonia` and `mscorelib` if needed. Firstly, we combine the two folders into a single folder to work with. " ] }, { "cell_type": "code", "execution_count": 36, "metadata": {}, "outputs": [], "source": [ "# Create a new folder to store all the compressed files\n", "if not os.path.exists('compressed_files'):\n", " os.makedirs('compressed_files')\n", "\n", "# copy all of the files in the Wikifonia folder, appending their names with the current path to the new folder\n", "for file in os.listdir('wikifonia'):\n", " # Check if the file is a .mxl file. If the file extension is not .mxl, skip the file\n", " # There are duplicates with the same name but a numarical extension (e.g. 'song.mxl' and 'song.mxl.1')\n", " if file.endswith('.mxl'):\n", " file_path = os.path.join(\"wikifonia\", file)\n", " new_name = file_path.replace('\\\\', '_')\n", " shutil.copy('wikifonia/' + file, 'compressed_files/' + new_name)\n", "\n", "# copy all of the mscorelib mxl files. The folder is recursively searched for all .mxl files\n", "for root, dirs, files in os.walk('mscorelib'):\n", " for file in files:\n", " if file.endswith('.mxl'):\n", " # Copy the file to the new folder, appending the name with the current path\n", " file_path = os.path.join(root, file)\n", " new_name = file_path.replace('\\\\', '_')\n", " shutil.copy(root + '/' + file, 'compressed_files/' + new_name)" ] }, { "cell_type": "markdown", "metadata": { "id": "NqTgzcOkCxjQ" }, "source": [ "# Convert from `.mxl` to `.MusicXML`" ] }, { "cell_type": "markdown", "metadata": { "id": "jXoff-z9m8nI" }, "source": [ "Load the MusicXML file into the script as a `.mxl`." ] }, { "cell_type": "code", "execution_count": 46, "metadata": {}, "outputs": [], "source": [ "def unzip_file(mxl_file):\n", " # Unzip the file\n", " with zipfile.ZipFile(os.path.join(\"compressed_files\", mxl_file), \"r\") as zip_ref:\n", " zip_ref.extractall(\"temp\")\n", " # Get the file with the extension .xml\n", " xml_file = [file for file in os.listdir(\"temp\") if file.endswith('.xml')][0]\n", " generated_file_location = os.path.join(\"temp\", xml_file)\n", " # get the name of the file without the extension\n", " new_name = os.path.basename(mxl_file.replace('.mxl', '.musicxml'))\n", " # join the new name with the current path\n", " new_name = os.path.join('uncompressed_original_files', new_name)\n", " \n", " if not os.path.exists(new_name):\n", " os.rename(generated_file_location, new_name)\n", " else:\n", " # print what file is being skipped\n", " print(\"File already exists: \", new_name)\n", "\n", " # remove the temp folder\n", " shutil.rmtree(\"temp\")" ] }, { "cell_type": "markdown", "metadata": { "id": "dKSqmu8_nHx1" }, "source": [ "Decompres it into a `.MusicXML` file. (https://www.reddit.com/r/learnprogramming/comments/bx2jmg/decompressing_a_compressed_xml_file_mxl/)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# for every file in the compressed_files folder, remove single quotes from the file name\n", "for file in os.listdir('compressed_files'):\n", " if \"'\" in file:\n", " new_name = file.replace(\"'\", \"\")\n", " \n", " if not os.path.exists('compressed_files/' + new_name):\n", " os.rename('compressed_files/' + file, 'compressed_files/' + new_name)\n", " else:\n", " # print what file is being skipped\n", " print(\"File already exists: \", new_name)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Create a new folder to store all the compressed files\n", "if not os.path.exists('uncompressed_original_files'):\n", " os.makedirs('uncompressed_original_files')\n", "\n", "# copy all of the files from the compressed_files folder to the uncompressed_original_files folder, using the unzip_file function\n", "for file in os.listdir('compressed_files'):\n", " # Convert the file name to a string\n", " file = str(file)\n", " # unzip the file\n", " unzip_file(file)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "NOTE: All files from these sources are in `score-partwise` format already. If your files are not, a conversion from `score-timewise` is needed. Each part is one or more entries, and Chord Llama does not understand multi-part files. Please see W3's MusicXML page on file stucture for more information: https://www.w3.org/2021/06/musicxml40/tutorial/structure-of-musicxml-files/ " ] }, { "cell_type": "markdown", "metadata": { "id": "DNjh_zkhDfqg" }, "source": [ "# Clean the file\n", "\n", "We need to remove anything unnessary for the tokenizer, anything that does not help with the core idea of music generation, or is too rare to probably be taught to the model." ] }, { "cell_type": "markdown", "metadata": { "id": "xf6TXCrfn1LC" }, "source": [ "Remove unnecessary components like `` and attributes like `default-y`.\n", "\n", "To optimize the tokenizer and training, we need to remove as much as possible without significantly editing the meaning of the notes and removing complexity." ] }, { "cell_type": "code", "execution_count": 104, "metadata": { "id": "2imGMgYw-e5S" }, "outputs": [], "source": [ "def element_deleter(root: Element, element_name: str) -> None:\n", " element_to_delete = root.find(element_name)\n", "\n", " while element_to_delete is not None:\n", " root.remove(element_to_delete)\n", " element_to_delete = root.find(element_name)" ] }, { "cell_type": "code", "execution_count": 30, "metadata": { "id": "R3rIA_i1CO1o" }, "outputs": [], "source": [ "def attribute_deleter(element: Element, attribute_name: str) -> None:\n", " if attribute_name in element.attrib:\n", " del element.attrib[attribute_name]" ] }, { "cell_type": "code", "execution_count": 31, "metadata": {}, "outputs": [], "source": [ "font_array = [\n", " \"font-family\",\n", " \"font-size\",\n", " \"font-style\",\n", " \"font-weight\",\n", "]\n", "\n", "core_attributes = [\n", " \"default-y\",\n", " \"default-x\",\n", " \"color\",\n", " \"id\",\n", "]\n", "\n", "relative_print = [\n", " \"print-object\",\n", " \"relative-x\",\n", " \"relative-y\",\n", "]" ] }, { "cell_type": "code", "execution_count": 32, "metadata": { "id": "z2mkAQLxguWR" }, "outputs": [], "source": [ "score_partwise_removed_elements = [\n", " \"work\",\n", " \"movement-number\",\n", " \"movement-title\",\n", " \"identification\",\n", " \"defaults\",\n", " \"credit\",\n", " \"part-list\"\n", "]\n", "\n", "score_partwise_kept_elements = [\n", " \"part\"\n", "]\n", "\n", "score_partwise_removed_attributes = [\n", " \"version\",\n", "]\n", "\n", "score_partwise_dict = {\n", " 'removed_elements': score_partwise_removed_elements,\n", " 'kept_elements': score_partwise_kept_elements,\n", " 'removed_attributes': score_partwise_removed_attributes,\n", " 'kept_attributes': [],\n", "}" ] }, { "cell_type": "code", "execution_count": 33, "metadata": {}, "outputs": [], "source": [ "part_kept_elements = [\n", " \"measure\",\n", "]\n", "\n", "part_removed_attributes = [\n", " \"id\",\n", "]\n", "\n", "part_dict = {\n", " 'removed_elements': [],\n", " 'kept_elements': part_kept_elements,\n", " 'removed_attributes': part_removed_attributes,\n", " 'kept_attributes': [],\n", "}" ] }, { "cell_type": "code", "execution_count": 155, "metadata": {}, "outputs": [], "source": [ "measure_removed_elements = [\n", " \"print\",\n", " \"direction\",\n", " \"barline\",\n", "]\n", "\n", "measure_kept_elements = [\n", " \"attributes\",\n", " \"note\",\n", " \"backup\",\n", " \"forward\",\n", " \"harmony\",\n", " \"sound\",\n", "]\n", "\n", "measure_removed_attributes = [\n", " \"width\",\n", " \"implicit\",\n", " \"number\",\n", " \"id\",\n", " \"implicit\",\n", " \"text\",\n", " \"non-controlling\"\n", "]\n", "\n", "measure_dict = {\n", " 'removed_elements': measure_removed_elements,\n", " 'kept_elements': measure_kept_elements,\n", " 'removed_attributes': measure_removed_attributes,\n", " 'kept_attributes': [],\n", "}" ] }, { "cell_type": "code", "execution_count": 153, "metadata": {}, "outputs": [], "source": [ "attributes_removed_elements = [\n", " \"staff-details\",\n", "]\n", "\n", "attributes_kept_elements = [\n", " \"divisions\", # No elements or attributes inside \n", " \"key\",\n", " \"time\",\n", " \"clef\",\n", " \"transpose\",\n", " \"staves\", # Used to determine if the part should be discarded later\n", " \"measure-style\",\n", "]\n", "\n", "attributes_dict = {\n", " 'removed_elements': attributes_removed_elements,\n", " 'kept_elements': attributes_kept_elements,\n", " 'removed_attributes': [],\n", " 'kept_attributes': [],\n", "}" ] }, { "cell_type": "code", "execution_count": 36, "metadata": {}, "outputs": [], "source": [ "key_kept_elements = [\n", " \"fifths\",\n", "]\n", "\n", "key_removed_attributes = [\n", " \"number\",\n", "] + font_array + core_attributes + relative_print\n", "\n", "key_dict = {\n", " 'removed_elements': [],\n", " 'kept_elements': key_kept_elements,\n", " 'removed_attributes': key_removed_attributes,\n", " 'kept_attributes': [],\n", "}" ] }, { "cell_type": "code", "execution_count": 37, "metadata": {}, "outputs": [], "source": [ "time_kept_elements = [\n", " \"beats\",\n", " \"beat-type\",\n", "]\n", "\n", "time_removed_attributes = [\n", " \"halign\",\n", " \"separator\",\n", " \"symbol\",\n", " \"valign\",\n", "] + font_array + core_attributes + relative_print\n", "\n", "time_dict = {\n", " 'removed_elements': [],\n", " 'kept_elements': time_kept_elements,\n", " 'removed_attributes': time_removed_attributes,\n", " 'kept_attributes': [],\n", "}" ] }, { "cell_type": "code", "execution_count": 38, "metadata": {}, "outputs": [], "source": [ "clef_kept_elements = [\n", " \"sign\",\n", " \"line\",\n", "]\n", "\n", "clef_removed_attributes = [\n", " \"additional\",\n", " \"after-barline\",\n", " \"number\",\n", " \"size\"\n", "] + font_array + core_attributes + relative_print\n", "\n", "clef_dict = {\n", " 'removed_elements': [],\n", " 'kept_elements': clef_kept_elements,\n", " 'removed_attributes': clef_removed_attributes,\n", " 'kept_attributes': [],\n", "}" ] }, { "cell_type": "code", "execution_count": 168, "metadata": {}, "outputs": [], "source": [ "note_removed_elements = [\n", " \"lyric\",\n", " \"stem\",\n", " \"voice\",\n", " \"beam\",\n", " \"notations\", # Only used for visual clarity\n", " \"instrument\", # We will always use the same instrument\n", " \"notehead\",\n", "]\n", "\n", "note_kept_elements = [\n", " \"pitch\",\n", " \"duration\", # No elements or attributes inside \n", " \"type\",\n", " \"chord\", # No elements or attributes inside \n", " \"accidental\",\n", " \"rest\",\n", " \"dot\",\n", " \"tie\",\n", " \"time-modification\",\n", " \"staff\", # Doesn't really matter if it's kept, used with stave\n", " \"unpitched\",\n", " \"grace\",\n", " \"cue\",\n", "]\n", "\n", "note_removed_attributes = [\n", " \"dynamics\",\n", " \"end-dynamics\"\n", "] + font_array + core_attributes + relative_print\n", "\n", "note_kept_attributes = [\n", " \"release\",\n", " \"attack\",\n", "]\n", "\n", "note_dict = {\n", " 'removed_elements': note_removed_elements,\n", " 'kept_elements': note_kept_elements,\n", " 'removed_attributes': note_removed_attributes,\n", " 'kept_attributes': note_kept_attributes,\n", "}" ] }, { "cell_type": "code", "execution_count": 49, "metadata": {}, "outputs": [], "source": [ "pitch_kept_elements = [\n", " \"step\", # No elements or attributes inside \n", " \"octave\", # No elements or attributes inside \n", "]\n", "\n", "pitch_dict = {\n", " 'removed_elements': [],\n", " 'kept_elements': pitch_kept_elements,\n", " 'removed_attributes': [],\n", " 'kept_attributes': [],\n", "}" ] }, { "cell_type": "code", "execution_count": 63, "metadata": {}, "outputs": [], "source": [ "accidental_removed_attributes = [\n", " \"bracket\",\n", " \"cautionary\",\n", " \"editorial\",\n", " \"parentheses\",\n", " \"size\",\n", " \"smufl\",\n", "] + font_array + core_attributes + relative_print\n", "\n", "accidental_dict = {\n", " 'removed_elements': [],\n", " 'kept_elements': [],\n", " 'removed_attributes': accidental_removed_attributes,\n", " 'kept_attributes': [],\n", "}" ] }, { "cell_type": "code", "execution_count": 67, "metadata": {}, "outputs": [], "source": [ "rest_kept_elements = [\n", " \"display-step\", # No elements or attributes inside \n", " \"display-octave\", # No elements or attributes inside \n", "]\n", "\n", "rest_kept_attributes = [\n", " \"measure\"\n", "]\n", "\n", "rest_dict = {\n", " 'removed_elements': [],\n", " 'kept_elements': rest_kept_elements,\n", " 'removed_attributes': [],\n", " 'kept_attributes': rest_kept_attributes,\n", "}" ] }, { "cell_type": "code", "execution_count": 71, "metadata": {}, "outputs": [], "source": [ "dot_kept_attributes = [\n", " \"placement\"\n", "] + font_array + core_attributes + relative_print\n", "\n", "dot_dict = {\n", " 'removed_elements': [],\n", " 'kept_elements': [],\n", " 'removed_attributes': [],\n", " 'kept_attributes': dot_kept_attributes,\n", "}" ] }, { "cell_type": "code", "execution_count": 76, "metadata": {}, "outputs": [], "source": [ "tie_kept_attributes = [\n", " \"type\",\n", "]\n", "\n", "tie_dict = {\n", " 'removed_elements': [],\n", " 'kept_elements': [],\n", " 'removed_attributes': [],\n", " 'kept_attributes': tie_kept_attributes,\n", "}" ] }, { "cell_type": "code", "execution_count": 108, "metadata": {}, "outputs": [], "source": [ "time_modification_kept_elements = [\n", " \"actual-notes\"\n", " \"normal-notes\"\n", "]\n", "\n", "time_modification_dict = {\n", " 'removed_elements': [],\n", " 'kept_elements': time_modification_kept_elements,\n", " 'removed_attributes': [],\n", " 'kept_attributes': [],\n", "}" ] }, { "cell_type": "code", "execution_count": 52, "metadata": {}, "outputs": [], "source": [ "harmony_removed_attributes = [\n", " \"placement\",\n", " \"print-frame\",\n", " \"system\"\n", "] + font_array + core_attributes + relative_print\n", "\n", "harmony_dict = {\n", " 'removed_elements': [],\n", " 'kept_elements': [],\n", " 'removed_attributes': harmony_removed_attributes,\n", " 'kept_attributes': [],\n", "}" ] }, { "cell_type": "code", "execution_count": 42, "metadata": {}, "outputs": [], "source": [ "def element_cleaner(element: Element, element_dict: dict) -> Element:\n", " for attribute_name in element.attrib:\n", " if attribute_name in element_dict['removed_attributes']:\n", " attribute_deleter(element, attribute_name)\n", " elif attribute_name not in element_dict['kept_attributes']:\n", " raise ValueError(\"Found the attribute: {} in {}\".format(attribute_name, element.tag))\n", " for child in element:\n", " if child.tag in element_dict['removed_elements']:\n", " element_deleter(element, element_name=child.tag)\n", " elif child.tag not in element_dict['kept_elements']:\n", " raise ValueError(\"Found the element: {} in {}\".format(child.tag, element.tag))" ] }, { "cell_type": "code", "execution_count": 149, "metadata": { "id": "Me7rEFjcLebL" }, "outputs": [], "source": [ "def music_xml_cleaner(score_partwise: Element) -> None:\n", " element_cleaner(score_partwise, score_partwise_dict)\n", " element_cleaner(score_partwise, score_partwise_dict)\n", " \n", " for part in score_partwise:\n", " element_cleaner(part, part_dict)\n", " for measure in part:\n", " element_cleaner(measure, measure_dict)\n", " for element in measure:\n", " if element.tag == \"note\":\n", " element_cleaner(element, note_dict)\n", " for note_element in element:\n", " if element.tag == \"pitch\":\n", " element_cleaner(element, pitch_dict)\n", " elif element.tag == \"accidental\":\n", " element_cleaner(element, accidental_dict)\n", " elif element.tag == \"rest\":\n", " element_cleaner(element, rest_dict)\n", " elif element.tag == \"dot\":\n", " element_cleaner(element, dot_dict)\n", " elif element.tag == \"tie\":\n", " element_cleaner(element, tie_dict)\n", " elif element.tag == \"time-modification\":\n", " element_cleaner(element, time_modification_dict)\n", " elif element.tag == \"attributes\":\n", " element_cleaner(element, attributes_dict)\n", " return score_partwise" ] }, { "cell_type": "code", "execution_count": 147, "metadata": {}, "outputs": [], "source": [ "def clean_file(file_name: str) -> None:\n", " file_path = os.path.join('uncompressed_original_files', file_name)\n", " tree: ElementTree = ET.parse(file_path)\n", " root: Element = tree.getroot()\n", " cleaned_root = music_xml_cleaner(root)\n", " cleaned_root = music_xml_cleaner(cleaned_root)\n", " cleaned_root = music_xml_cleaner(cleaned_root)\n", " cleaned_tree = ET.ElementTree(cleaned_root)\n", " cleaned_file_path = os.path.join('cleaned_musicxml_files', file_name)\n", " cleaned_tree.write(cleaned_file_path, encoding='utf-8', xml_declaration=True)" ] }, { "cell_type": "code", "execution_count": 171, "metadata": {}, "outputs": [], "source": [ "if not os.path.exists('cleaned_musicxml_files'):\n", " os.makedirs('cleaned_musicxml_files')\n", "\n", "for file_name in os.listdir('uncompressed_original_files'):\n", " if not os.path.exists(os.path.join(\"cleaned_musicxml_files\", file_name)):\n", " print(file_name)\n", " clean_file(file_name)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Split the file into a folder with each file being a single part\n", "- Discard parts that have more then one attribute\n", "- Discard parts that have `` in the atribute" ] }, { "cell_type": "code", "execution_count": 230, "metadata": {}, "outputs": [], "source": [ "def splitter(root: Element) -> List[Element]:\n", " parts: List[Element] = []\n", " for part in root:\n", " attributes_counter = 0\n", " attribute_save = None\n", " for measure in part:\n", " attributes_counter += len(measure.findall(\"attributes\"))\n", " if len(measure.findall(\"attributes\")) > 0:\n", " attribute_save = measure.find(\"attributes\")\n", " if attributes_counter == 1 and attribute_save.find(\"staves\") == None:\n", " parts.append(part)\n", " return parts" ] }, { "cell_type": "code", "execution_count": 231, "metadata": {}, "outputs": [], "source": [ "def split_file(file_name: str):\n", " file_path = os.path.join('cleaned_musicxml_files', file_name)\n", " tree: ElementTree = ET.parse(file_path)\n", " root: Element = tree.getroot()\n", " parts: List[Element] = splitter(root)\n", " if len(parts) > 0:\n", " if not os.path.exists(os.path.join('part_split_files', file_name)):\n", " os.makedirs(os.path.join('part_split_files', file_name))\n", " for index, part in enumerate(parts):\n", " part_tree = ET.ElementTree(part)\n", " part_tree_path = os.path.join('part_split_files', file_name, 'part_{}.musicxml'.format(index))\n", " part_tree.write(part_tree_path, encoding='utf-8', xml_declaration=True)" ] }, { "cell_type": "code", "execution_count": 235, "metadata": {}, "outputs": [], "source": [ "if not os.path.exists('part_split_files'):\n", " os.makedirs('part_split_files')\n", "\n", "for file_name in os.listdir('cleaned_musicxml_files'):\n", " if not os.path.exists(os.path.join(\"part_split_files\", file_name)):\n", " # print(file_name)\n", " split_file(file_name)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Convert each part to YAML and split to keep the file under the 4k context length" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "from transformers import LlamaTokenizerFast\n", "import xmltodict\n", "import yaml\n", "import math\n", "from typing import Any, Dict" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "def xml_to_dict(xml_file: str):\n", " with open(xml_file, 'r') as f:\n", " xml_data = f.read()\n", " xml_dict = xmltodict.parse(xml_data)\n", " return xml_dict\n", "\n", "def dict_to_yaml(input_dict: Dict[str, Any], yaml_file: str):\n", " with open(yaml_file, 'w') as f:\n", " yaml.dump(input_dict, f, default_flow_style=False)" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [], "source": [ "tokenizer = LlamaTokenizerFast.from_pretrained(\"hf-internal-testing/llama-tokenizer\")" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "with open(\"temp.yaml\", \"w\"):\n", " pass" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [], "source": [ "source_folder = 'part_split_files'\n", "training_yaml_folder = \"training_yaml\"\n", "temp_file = 'temp.yaml'\n", "max_safe_context_length = 3500" ] }, { "cell_type": "code", "execution_count": 13, "metadata": {}, "outputs": [], "source": [ "for song in os.listdir(source_folder):\n", " song_name = os.path.splitext(song)[0].strip()\n", " if os.path.exists(os.path.join(training_yaml_folder, song_name)):\n", " continue\n", " print(song)\n", " for part in os.listdir(os.path.join(source_folder, song)):\n", " part_dict = xml_to_dict(os.path.join(source_folder, song, part))\n", " dict_to_yaml(part_dict, temp_file)\n", "\n", " with open(temp_file, 'r') as file:\n", " raw_text = file.read()\n", " token_length = len(tokenizer.encode(raw_text))\n", "\n", " instruction_dict = part_dict['part']['measure'][0]['attributes']\n", "\n", " del part_dict['part']['measure'][0]['attributes']\n", "\n", " measures = part_dict['part']['measure']\n", "\n", " # Split into chucnks to keep below the token limit\n", " chunks_count = math.ceil(token_length / max_safe_context_length)\n", " chunk_size = len(measures) // chunks_count\n", " remainder = len(measures) % chunks_count\n", "\n", " chunks = []\n", " start = 0\n", "\n", " for chunk in range(chunks_count):\n", " chunk_length = chunk_size + (1 if chunk < remainder else 0) # Adjust chunk length for remainder\n", " chunks.append(measures[start:start + chunk_length])\n", " start += chunk_length\n", " \n", " for index, chunk in enumerate(chunks):\n", " input_dict = chunk[:len(chunk)//2]\n", " output_dict = chunk[len(chunk)//2:]\n", "\n", " # We have the instruction, input, and output dicts, so now turn each into yaml and save to files.\n", " song_name = os.path.splitext(song)[0].strip().strip('.')\n", " part_name = os.path.splitext(part)[0]\n", " chunk_name = \"chunk_{}\".format(index)\n", " data_location = os.path.join(training_yaml_folder, song_name, part_name, chunk_name)\n", " os.makedirs(data_location)\n", "\n", " instruction_file_name = \"instruction.yaml\".format(index)\n", " instruction_file_locaiton = os.path.join(data_location, instruction_file_name)\n", " with open(instruction_file_locaiton, 'w'):\n", " pass \n", " dict_to_yaml(instruction_dict, instruction_file_locaiton)\n", "\n", " input_file_name = \"input.yaml\".format(index)\n", " input_file_locaiton = os.path.join(data_location, input_file_name)\n", " with open(input_file_locaiton, 'w'):\n", " pass \n", " dict_to_yaml(input_dict, input_file_locaiton)\n", "\n", " output_file_name = \"output.yaml\".format(index)\n", " output_file_locaiton = os.path.join(data_location, output_file_name)\n", " with open(output_file_locaiton, 'w'):\n", " pass \n", " dict_to_yaml(output_dict, output_file_locaiton)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Convert to JSON Lines format\n", "We need to store the data in JSON Lines format so it can be saved to Hugging Face" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import jsonlines" ] }, { "cell_type": "code", "execution_count": 24, "metadata": {}, "outputs": [], "source": [ "def generate_entry(path: str):\n", " entry = {}\n", "\n", " input_file = os.path.join(path, \"input.yaml\")\n", " output_file = os.path.join(path, \"output.yaml\")\n", " instruction_file = os.path.join(path, \"instruction.yaml\")\n", "\n", " with open(instruction_file, 'r') as file:\n", " # Read the contents of the file\n", " file_contents = file.read()\n", " entry['instruction'] = file_contents\n", "\n", " with open(input_file, 'r') as file:\n", " # Read the contents of the file\n", " file_contents = file.read()\n", " entry['input'] = file_contents\n", "\n", " with open(output_file, 'r') as file:\n", " # Read the contents of the file\n", " file_contents = file.read()\n", " entry['output'] = file_contents\n", "\n", " return entry" ] }, { "cell_type": "code", "execution_count": 26, "metadata": {}, "outputs": [], "source": [ "source_folder = \"training_yaml\"\n", "\n", "with jsonlines.open('cord_llama_data.jsonl', mode='a') as writer:\n", " for song in os.listdir(source_folder):\n", " for part in os.listdir(os.path.join(source_folder, song)):\n", " for chunk in os.listdir(os.path.join(source_folder, song, part)):\n", " entry = generate_entry(os.path.join(source_folder, song, part, chunk))\n", " writer.write(entry)" ] } ], "metadata": { "colab": { "provenance": [] }, "kernelspec": { "display_name": "Python 3", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.11" } }, "nbformat": 4, "nbformat_minor": 0 }