123LETSPLAY commited on
Commit
e820640
1 Parent(s): c920c24

Create requirements.txt

Browse files
Files changed (1) hide show
  1. requirements.txt +5 -21
requirements.txt CHANGED
@@ -1,21 +1,5 @@
1
- from transformers import VisionEncoderDecoderModel, ViTImageProcessor, AutoTokenizer
2
- from PIL import Image
3
-
4
- # Load the pre-trained model and processor
5
- model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
6
- processor = ViTImageProcessor.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
7
- tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
8
-
9
- # Load an image
10
- image_path = "path/to/your/image.jpg" # Update with your image path
11
- image = Image.open(image_path)
12
-
13
- # Process the image
14
- pixel_values = processor(images=image, return_tensors="pt").pixel_values
15
-
16
- # Generate text
17
- output_ids = model.generate(pixel_values)
18
- text = tokenizer.decode(output_ids[0], skip_special_tokens=True)
19
-
20
- # Print the extracted text
21
- print(text)
 
1
+ streamlit
2
+ transformers
3
+ torch
4
+ torchvision
5
+ Pillow