wvangils commited on
Commit
c236965
1 Parent(s): a19ee79

Code for generation of text and image

Browse files
Files changed (1) hide show
  1. app.py +92 -0
app.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import pipeline
2
+ import gradio as gr
3
+ import re
4
+ import torch
5
+ from torch import autocast
6
+ from diffusers import StableDiffusionPipeline
7
+
8
+ # Available models for pipeline
9
+ # checkpoint = 'wvangils/CTRL-Beatles-Lyrics-finetuned-newlyrics'
10
+ # checkpoint = 'wvangils/GPT-Medium-Beatles-Lyrics-finetuned-newlyrics'
11
+ # checkpoint = 'wvangils/GPT-Neo-125m-Beatles-Lyrics-finetuned-newlyrics'
12
+ # checkpoint = 'wvangils/GPT2-Beatles-Lyrics-finetuned-newlyrics'
13
+ # checkpoint = 'wvangils/DistilGPT2-Beatles-Lyrics-finetuned-newlyrics'
14
+ # checkpoint = 'wvangils/BLOOM-350m-Beatles-Lyrics-finetuned-newlyrics'
15
+
16
+ checkpoint_choices = ['wvangils/GPT-Medium-Beatles-Lyrics-finetuned-newlyrics', 'wvangils/GPT-Neo-125m-Beatles-Lyrics-finetuned-newlyrics', 'wvangils/BLOOM-560m-Beatles-Lyrics-finetuned']
17
+
18
+ # Create function for generation
19
+
20
+ def generate_beatles(checkpoint, input_prompt, temperature, top_p):
21
+
22
+ # Create generator for different models
23
+
24
+ generator = pipeline("text-generation", model=checkpoint)
25
+ generated_lyrics = generator(input_prompt
26
+ , max_length = 50
27
+ , num_return_sequences = 1
28
+ , return_full_text = True
29
+ , verbose = 0
30
+ #, num_beams = 1
31
+ #, early_stopping = True # Werkt niet goed lijkt
32
+ , temperature = temperature
33
+ #, top_k = 50 # Default 50
34
+ , top_p = top_p # Default 1.0
35
+ , no_repeat_ngram_size = 3 # Default = 0
36
+ , repetition_penalty = 1.0 # Default = 1.0
37
+ #, do_sample = True # Default = False
38
+ )[0]["generated_text"]
39
+
40
+ lyrics_sentences = re.sub('\n', '. ', generated_lyrics)
41
+ title_generator = pipeline('summarization', model='czearing/story-to-title')
42
+ title = title_generator(lyrics_sentences, min_length=1, max_length=10, repetition_penalty=2.5)[0]['summary_text']
43
+
44
+ pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4"
45
+ , revision="fp16"
46
+ , torch_dtype=torch.float16
47
+ #, use_auth_token=True
48
+ , use_auth_token="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")
49
+
50
+ # And move the pipeline to GPU
51
+ pipe = pipe.to("cuda")
52
+
53
+ # Set a seed for reproducebility
54
+ seed = torch.Generator("cuda").manual_seed(42)
55
+ image_input = "Oil painting for " + title
56
+
57
+ # Generate the image, [PIL format](https://pillow.readthedocs.io/en/stable/)
58
+ with autocast("cuda"):
59
+ image = pipe(image_input, num_inference_steps=50, generator=seed, guidance_scale=8.5)["sample"][0]
60
+
61
+ return (title, generated_lyrics, image)
62
+
63
+ # Create textboxes for input and output
64
+ input_box = gr.Textbox(label="Input prompt:", placeholder="Write the start of a song here", value="In my dreams I am", lines=2, max_lines=5)
65
+ gen_lyrics = gr.Textbox(label="Lyrics by The Beatles and chosen language model:", lines=15)
66
+ gen_title = gr.Textbox(label="Proposed songtitle", lines=1)
67
+ gen_image = gr.Image(label="Proposed song cover", type="pil")
68
+
69
+ # Layout and text above the App
70
+ title='Beatles lyrics generator'
71
+ description="<p style='text-align: center'>Multiple language models were fine-tuned on lyrics from The Beatles to generate Beatles-like text. Give it a try!</p>"
72
+ article="""<p style='text-align: left'>A couple of data scientists working for <a href='https://cmotions.nl/' target="_blank">Cmotions</a> came together to construct a text generation model that will output Beatles-like text.
73
+ We tried several text generation models that we were able to load in Colab: a general <a href='https://huggingface.co/gpt2-medium' target='_blank'>GPT2-medium</a> model, the Eleuther AI small-sized GPT model <a href='https://huggingface.co/EleutherAI/gpt-neo-125M' target='_blank'>GPT-Neo</a> and the new kid on the block build by the <a href='https://bigscience.notion.site/BLOOM-BigScience-176B-Model-ad073ca07cdf479398d5f95d88e218c4' target='_blank'>Bigscience</a> initiative <a href='bigscience/bloom-350m' target='_blank'>BLOOM 350m</a>.
74
+ Further we've put together a <a href='https://huggingface.co/datasets/cmotions/Beatles_lyrics' target='_blank'> Huggingface dataset</a> containing all known lyrics created by The Beatles. <a href='https://www.theanalyticslab.nl/blogs/' target='_blank'>Read this blog </a> to see how this model was build in a Python notebook using Huggingface.
75
+ The default output contains 100 tokens and has a repetition penalty of 1.0.
76
+ </p>"""
77
+
78
+ # Let users select their own temperature and top-p
79
+ temperature = gr.Slider(minimum=0.1, maximum=1.0, step=0.1, label="Temperature (high = sensitive for low probability tokens)", value=0.7, show_label=True)
80
+ top_p = gr.Slider(minimum=0.1, maximum=1.0, step=0.1, label="Top-p (sample next possible words from given probability p)", value=0.5, show_label=True)
81
+ checkpoint = gr.Radio(checkpoint_choices, value='wvangils/GPT-Medium-Beatles-Lyrics-finetuned-newlyrics', interactive=True, label = 'Select fine-tuned model', show_label=True)
82
+
83
+ # Use generate Beatles function in demo-app Gradio
84
+ gr.Interface(fn=generate_beatles
85
+ , inputs=[checkpoint, input_box, temperature, top_p]
86
+ , outputs=[gen_title, gen_lyrics, gen_image]
87
+ #, examples=examples # output is not very fancy as you have to specify all inputs for every example
88
+ , title=title
89
+ , description=description
90
+ , article=article
91
+ , allow_flagging='never'
92
+ ).launch()