File size: 11,094 Bytes
2ac6b63
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2132d8f
2ac6b63
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c4244f7
2ac6b63
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e9eaf7f
 
2ac6b63
 
 
 
 
 
 
e9eaf7f
 
 
2ac6b63
 
 
 
e9eaf7f
2ac6b63
 
 
e9eaf7f
2ac6b63
 
 
 
 
dfccc9e
2ac6b63
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76065d0
2ac6b63
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e9eaf7f
 
2ac6b63
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e9eaf7f
2ac6b63
 
 
 
 
 
e9eaf7f
 
 
 
 
 
 
 
2ac6b63
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
import os
from bs4 import BeautifulSoup
import gradio as gr
from langchain import OpenAI, ConversationChain, LLMChain, PromptTemplate
from langchain.memory import ConversationBufferWindowMemory
import openai
import requests
from langchain.chat_models import ChatOpenAI
import ast
import imgkit
import pdfkit
import imgkit
import re
import glob

import openai


OPENAI_API_KEY = os.environ['OPENAI_API_KEY']


dict_list_format = '''[{'header': 'slide1_title',
  'paragraphs': ['bullet_point1',
   'bullet_point2',
   'bullet_point3',]},
'header': 'slide2_title',
  'paragraphs': ['bullet_point1',
   'bullet_point2',
   'bullet_point3',
...]},
'header': 'slide3_title',
  'paragraphs': ['bullet_point1',
   'bullet_point2',
   'bullet_point3',
...]},
'header': 'slide4_title',
  'paragraphs': ['bullet_point1',
   'bullet_point2',
   'bullet_point3',
...]},
'header': 'slide5_title',
  'paragraphs': ['bullet_point1',
   'bullet_point2',
   'bullet_point3',
...]}]
'''

import google.cloud.texttospeech as tts
from google.oauth2 import service_account
credentials = service_account.Credentials.from_service_account_file("tts_google.json")

def text_to_wav(voice_name: str, text: str, file_name: str):
    language_code = "-".join(voice_name.split("-")[:2])
    text_input = tts.SynthesisInput(text=text)
    voice_params = tts.VoiceSelectionParams(
        language_code=language_code, name=voice_name
    )
    audio_config = tts.AudioConfig(audio_encoding=tts.AudioEncoding.LINEAR16)

    client = tts.TextToSpeechClient(credentials=credentials)
    response = client.synthesize_speech(
        input=text_input,
        voice=voice_params,
        audio_config=audio_config,
    )

    filename = f"{file_name}"
    with open(filename, "wb") as out:
        out.write(response.audio_content)
        print(f'Generated speech saved to "{filename}"')
        


def prompt_to_video(video_prompt):
    
    template = '''
    {history}
    {human_input}
    '''
    prompt = PromptTemplate(
        input_variables=["history", "human_input"], 
        template=template
    )

    chatgpt_chain = LLMChain(
        llm=ChatOpenAI(model="gpt-4", temperature=0.5,openai_api_key=OPENAI_API_KEY), 
        prompt=prompt, 
        verbose=True, 
        memory=ConversationBufferWindowMemory(k=10),
    )

    prompt_input1 = f'''
    You are a world expert oracle that knows everything.
    You are also an excellent teacher that explains everything succintly and simply like towards a kid.
    You are also an expert slide maker and think everything step by step. 
    You are tasked to create 5 slides today.

    Here is the topic:
    {video_prompt}

    Here is the output python list format:
    {dict_list_format}

    The slides should be created in a python list format.
    The list consists of python dictionary objects in the list.
    Each dictionary object contains the header and paragraphs as keys.
    Do not name the slide as "Slide 1" or any number.  Insert header as header string.
    The header is the title of the slide and the paragraph should be a list of string object.
    Return the output in a python list format.
    Make sure there is only 5 objects in the python list.
    Do not declare a new variable, output the python list object only.
    Do not say "Here's your".  Directly output the python list object only.
    Make sure there is nothing before or after the python list object.  ONLY output the python list object.
    '''
    slide_str_list = []
    while len(slide_str_list) != 5:
        slide_dict=chatgpt_chain.predict(human_input=prompt_input1)
        try:
            slide_str_list = ast.literal_eval(slide_dict)
        except:
            print("Already formatted.")
        
    
    print("this is the slides:", slide_str_list)
    print("length is:", len(slide_str_list))
    
    html_out_list = []
    for i in slide_str_list:
        
        template = '''
        {history}
        {human_input}
        '''
        prompt = PromptTemplate(
            input_variables=["history", "human_input"], 
            template=template
        )

        chatgpt_chain = LLMChain(
            llm=ChatOpenAI(model="gpt-3.5-turbo", temperature=0,openai_api_key=OPENAI_API_KEY), 
            prompt=prompt, 
            verbose=True, 
            memory=ConversationBufferWindowMemory(k=10),
        )
        prompt_input2 = f'''
        You are a world expert oracle that knows everything.
        You are also an excellent teacher that explains everything succintly and simply like towards a kid.
        You are also an expert slide maker and thinks about everything step by step. 
        You are tasked to convert a python dictionary into a formatted HTML code.
        The dictionary object consist of the header and paragraph key.
        The paragraph key is a list of strings.
        Here is the dictionary object:
        {i}
        The slide should be created in a HTML format with the correct format of 16:9 aspect ratio.
        The wording of the slides should be formatted appropriately with the header and paragraph.
        The paragraph in the slides should be formatted in bullet points and each bullet point should be 1.5 line spacing apart.
        Header and paragraph should be aligned in an aesthetically pleasing way.
        Return the output as a nicely formatted HTML string.
        Header should be aligned to the center.
        Font color should be white and background black.
        Font should be Roboto.
        Do not say "Here's your" or "Sure".  Directly output the HTML string only.
        Make sure there is nothing before or after the HTML string.  ONLY output the HTML string.
        Do not explain what is the HTML code about.
        Do not declare a new variable, output the HTML string only.
        '''
        html_out_list.append(chatgpt_chain.predict(human_input=prompt_input2))

    extract_path = 'slide_' + video_prompt
    os.makedirs(extract_path, exist_ok=True)
        
    num = 1
    for html_string in html_out_list:
        print(html_string)
        with open(f"{extract_path}/slide_{num}.html", "w") as file:
            file.write(html_string)
        num = num + 1
        
    

    # Create the directory to extract to if it doesn't exist
    os.makedirs(extract_path, exist_ok=True)

    # Configuration for imgkit
    config = imgkit.config(wkhtmltoimage='/bin/wkhtmltoimage')

    # The path to store the images
    image_path = os.path.join(extract_path, 'images')
    os.makedirs(image_path, exist_ok=True)

    # Get the list of HTML files
    html_files = sorted([f for f in os.listdir(extract_path) if f.endswith('.html')])

    # Dictionary to store the file names and their corresponding images
    file_images = {}

    # Loop through the HTML files and convert them to images
    for html_file in html_files:
        # Full path of the HTML file
        full_path = os.path.join(extract_path, html_file)

        # Image file name
        image_file = re.sub('.html$', '.jpg', html_file)

        # Full path of the image file
        full_image_path = os.path.join(image_path, image_file)

        # Convert the HTML to an image
        imgkit.from_file(full_path, full_image_path, config=config)

        # Store the image file name
        file_images[html_file] = image_file

    print(file_images)
    
    template = '''
    {history}
    {human_input}
    '''
    prompt = PromptTemplate(
        input_variables=["history", "human_input"], 
        template=template
    )

    chatgpt_chain = LLMChain(
        llm=ChatOpenAI(model="gpt-4", temperature=0.5,openai_api_key=OPENAI_API_KEY), 
        prompt=prompt, 
        verbose=True, 
        memory=ConversationBufferWindowMemory(k=10),
    )
    
    prompt_input3 = f'''
    You are a world expert oracle that knows everything.
    You are also an excellent teacher that explains everything succintly and simply like towards a kid.
    You are an expert orator and presenter. 
    You are tasked to create a voiceover for 5 slides.
    The slides are formatted in a python list of dictionary objects.
    Each dictionary object is a slide.
    {slide_str_list}

    Input: Python list of dictionary objects
    Output: Python list of string objects

    The output list consists of string objects.
    The voiceover text purpose is a speech presentation of the slide.
    The voiceover text should be about the content of each slide but at the same time add additional information to make the presentation funny and engaging.
    Each string is a voiceover text of each slide of the python dictionary.
    Each voiceover string object should be around 80 words.
    
    Make sure there is only 5 objects in the python list.
    Do not declare a new variable, output the python list object only.
    Make sure there is nothing before or after the python list object.  ONLY output the python list object.
    Return the output in a python list format.
    Do not say "Here's your" or "Sure".  Directly output python list of dictionary object only.
    Do not declare a new variable, output the python list of dictionary object only.
    '''
    
    voiceover_list = []
   
    voiceover_list=chatgpt_chain.predict(human_input=prompt_input3)
    
    try:
        voiceover_list = ast.literal_eval(voiceover_list)
    except:
        print("Already formatted.")
        
    
    num = 1
    for i in voiceover_list:
        file_name = "slide/slide" + f"_{num}" + ".wav"
        text_to_wav("en-US-Neural2-F",i, file_name)
        print(file_name)
        num = num + 1
    
    
    # Get list of .jpg and .wav files from the correct directories
    jpg_files = sorted(glob.glob(f"{extract_path}/images/*.jpg"))
    wav_files = sorted(glob.glob(f"{extract_path}/*.wav"))

    jpg_files, wav_files
    
    from moviepy.editor import ImageSequenceClip, AudioFileClip, concatenate_videoclips

    # Create a list to store the clips
    clips = []

    # Loop through each jpg and wav file
    for jpg_file, wav_file in zip(jpg_files, wav_files):
        # Load the audio file and get its duration
        audio = AudioFileClip(wav_file)
        duration = audio.duration
        print(duration)

        # Calculate the frame rate as the inverse of the duration
        fps = 1 / duration if duration != 0 else 1

        # Create a video clip from the image and set its duration and fps to match the audio
        clip = ImageSequenceClip([jpg_file], durations=[duration], fps=fps)

        # Set the audio of the clip to the wav file
        clip = clip.set_audio(audio)

        # Add the clip to the list of clips
        clips.append(clip)

    # Concatenate all clips into a single video
    video = concatenate_videoclips(clips)
    
    video_path = f"{extract_path}/output.mp4"

    # Write the video to a file
    video.write_videofile(video_path)
    
    
    return video_path

iface = gr.Interface(
    fn=prompt_to_video, 
    inputs="text", 
    outputs=["file"],
    title="Prompt to Video Tutorial",
    description="Create a video tutorial to learn about anything!")

iface.launch()