Spaces:
Running
Running
File size: 6,006 Bytes
c6070db 35aaf1e c6070db 35aaf1e c6070db 35aaf1e c6070db 35aaf1e c6070db 35aaf1e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 |
import re
def japanese_cleaners(text):
from text.japanese import japanese_to_romaji_with_accent
text = japanese_to_romaji_with_accent(text)
text = re.sub(r'([A-Za-z])$', r'\1.', text)
return text
def japanese_cleaners2(text):
return japanese_cleaners(text).replace('ts', 'ʦ').replace('...', '…')
def korean_cleaners(text):
'''Pipeline for Korean text'''
from text.korean import latin_to_hangul, number_to_hangul, divide_hangul
text = latin_to_hangul(text)
text = number_to_hangul(text)
text = divide_hangul(text)
text = re.sub(r'([\u3131-\u3163])$', r'\1.', text)
return text
def chinese_cleaners(text):
'''Pipeline for Chinese text'''
from text.mandarin import number_to_chinese, chinese_to_bopomofo, latin_to_bopomofo
text = number_to_chinese(text)
text = chinese_to_bopomofo(text)
text = latin_to_bopomofo(text)
text = re.sub(r'([ˉˊˇˋ˙])$', r'\1。', text)
return text
def zh_ja_mixture_cleaners(text):
from text.mandarin import chinese_to_romaji
from text.japanese import japanese_to_romaji_with_accent
text = re.sub(r'\[ZH\](.*?)\[ZH\]',
lambda x: chinese_to_romaji(x.group(1))+' ', text)
text = re.sub(r'\[JA\](.*?)\[JA\]', lambda x: japanese_to_romaji_with_accent(
x.group(1)).replace('ts', 'ʦ').replace('u', 'ɯ').replace('...', '…')+' ', text)
text = re.sub(r'\s+$', '', text)
text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
return text
def sanskrit_cleaners(text):
text = text.replace('॥', '।').replace('ॐ', 'ओम्')
if text[-1] != '।':
text += ' ।'
return text
def cjks_cleaners(text):
from text.mandarin import chinese_to_lazy_ipa
from text.japanese import japanese_to_ipa
from text.korean import korean_to_lazy_ipa
from text.sanskrit import devanagari_to_ipa
from text.english import english_to_lazy_ipa
text = re.sub(r'\[ZH\](.*?)\[ZH\]',
lambda x: chinese_to_lazy_ipa(x.group(1))+' ', text)
text = re.sub(r'\[JA\](.*?)\[JA\]',
lambda x: japanese_to_ipa(x.group(1))+' ', text)
text = re.sub(r'\[KO\](.*?)\[KO\]',
lambda x: korean_to_lazy_ipa(x.group(1))+' ', text)
text = re.sub(r'\[SA\](.*?)\[SA\]',
lambda x: devanagari_to_ipa(x.group(1))+' ', text)
text = re.sub(r'\[EN\](.*?)\[EN\]',
lambda x: english_to_lazy_ipa(x.group(1))+' ', text)
text = re.sub(r'\s+$', '', text)
text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
return text
def cjke_cleaners(text):
from text.mandarin import chinese_to_lazy_ipa
from text.japanese import japanese_to_ipa
from text.korean import korean_to_ipa
from text.english import english_to_ipa2
text = re.sub(r'\[ZH\](.*?)\[ZH\]', lambda x: chinese_to_lazy_ipa(x.group(1)).replace(
'ʧ', 'tʃ').replace('ʦ', 'ts').replace('ɥan', 'ɥæn')+' ', text)
text = re.sub(r'\[JA\](.*?)\[JA\]', lambda x: japanese_to_ipa(x.group(1)).replace('ʧ', 'tʃ').replace(
'ʦ', 'ts').replace('ɥan', 'ɥæn').replace('ʥ', 'dz')+' ', text)
text = re.sub(r'\[KO\](.*?)\[KO\]',
lambda x: korean_to_ipa(x.group(1))+' ', text)
text = re.sub(r'\[EN\](.*?)\[EN\]', lambda x: english_to_ipa2(x.group(1)).replace('ɑ', 'a').replace(
'ɔ', 'o').replace('ɛ', 'e').replace('ɪ', 'i').replace('ʊ', 'u')+' ', text)
text = re.sub(r'\s+$', '', text)
text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
return text
def cjke_cleaners2(text):
from text.mandarin import chinese_to_ipa
from text.japanese import japanese_to_ipa2
from text.korean import korean_to_ipa
from text.english import english_to_ipa2
text = re.sub(r'\[ZH\](.*?)\[ZH\]',
lambda x: chinese_to_ipa(x.group(1))+' ', text)
text = re.sub(r'\[JA\](.*?)\[JA\]',
lambda x: japanese_to_ipa2(x.group(1))+' ', text)
text = re.sub(r'\[KO\](.*?)\[KO\]',
lambda x: korean_to_ipa(x.group(1))+' ', text)
text = re.sub(r'\[EN\](.*?)\[EN\]',
lambda x: english_to_ipa2(x.group(1))+' ', text)
text = re.sub(r'\s+$', '', text)
text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
return text
def thai_cleaners(text):
from text.thai import num_to_thai, latin_to_thai
text = num_to_thai(text)
text = latin_to_thai(text)
return text
def shanghainese_cleaners(text):
from text.shanghainese import shanghainese_to_ipa
text = shanghainese_to_ipa(text)
text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
return text
def chinese_dialect_cleaners(text):
from text.mandarin import chinese_to_ipa2
from text.japanese import japanese_to_ipa3
from text.shanghainese import shanghainese_to_ipa
from text.cantonese import cantonese_to_ipa
from text.english import english_to_lazy_ipa2
from text.ngu_dialect import ngu_dialect_to_ipa
text = re.sub(r'\[ZH\](.*?)\[ZH\]',
lambda x: chinese_to_ipa2(x.group(1))+' ', text)
text = re.sub(r'\[JA\](.*?)\[JA\]',
lambda x: japanese_to_ipa3(x.group(1)).replace('Q', 'ʔ')+' ', text)
text = re.sub(r'\[SH\](.*?)\[SH\]', lambda x: shanghainese_to_ipa(x.group(1)).replace('1', '˥˧').replace('5',
'˧˧˦').replace('6', '˩˩˧').replace('7', '˥').replace('8', '˩˨').replace('ᴀ', 'ɐ').replace('ᴇ', 'e')+' ', text)
text = re.sub(r'\[GD\](.*?)\[GD\]',
lambda x: cantonese_to_ipa(x.group(1))+' ', text)
text = re.sub(r'\[EN\](.*?)\[EN\]',
lambda x: english_to_lazy_ipa2(x.group(1))+' ', text)
text = re.sub(r'\[([A-Z]{2})\](.*?)\[\1\]', lambda x: ngu_dialect_to_ipa(x.group(2), x.group(
1)).replace('ʣ', 'dz').replace('ʥ', 'dʑ').replace('ʦ', 'ts').replace('ʨ', 'tɕ')+' ', text)
text = re.sub(r'\s+$', '', text)
text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
return text
|