mbayo97 commited on
Commit
ae9ee81
1 Parent(s): cd8208d

Upload 4 files

Browse files
Files changed (4) hide show
  1. .gitattributes +1 -4
  2. README.md +5 -6
  3. app.py +61 -0
  4. requirements.txt +4 -0
.gitattributes CHANGED
@@ -2,13 +2,11 @@
2
  *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
4
  *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
  *.ftz filter=lfs diff=lfs merge=lfs -text
7
  *.gz filter=lfs diff=lfs merge=lfs -text
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
9
  *.joblib filter=lfs diff=lfs merge=lfs -text
10
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
  *.model filter=lfs diff=lfs merge=lfs -text
13
  *.msgpack filter=lfs diff=lfs merge=lfs -text
14
  *.npy filter=lfs diff=lfs merge=lfs -text
@@ -16,13 +14,12 @@
16
  *.onnx filter=lfs diff=lfs merge=lfs -text
17
  *.ot filter=lfs diff=lfs merge=lfs -text
18
  *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
  *.pickle filter=lfs diff=lfs merge=lfs -text
21
  *.pkl filter=lfs diff=lfs merge=lfs -text
 
22
  *.pt filter=lfs diff=lfs merge=lfs -text
23
  *.pth filter=lfs diff=lfs merge=lfs -text
24
  *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
  *.tar.* filter=lfs diff=lfs merge=lfs -text
28
  *.tflite filter=lfs diff=lfs merge=lfs -text
 
2
  *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
4
  *.bz2 filter=lfs diff=lfs merge=lfs -text
 
5
  *.ftz filter=lfs diff=lfs merge=lfs -text
6
  *.gz filter=lfs diff=lfs merge=lfs -text
7
  *.h5 filter=lfs diff=lfs merge=lfs -text
8
  *.joblib filter=lfs diff=lfs merge=lfs -text
9
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
 
10
  *.model filter=lfs diff=lfs merge=lfs -text
11
  *.msgpack filter=lfs diff=lfs merge=lfs -text
12
  *.npy filter=lfs diff=lfs merge=lfs -text
 
14
  *.onnx filter=lfs diff=lfs merge=lfs -text
15
  *.ot filter=lfs diff=lfs merge=lfs -text
16
  *.parquet filter=lfs diff=lfs merge=lfs -text
 
17
  *.pickle filter=lfs diff=lfs merge=lfs -text
18
  *.pkl filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
  *.pt filter=lfs diff=lfs merge=lfs -text
21
  *.pth filter=lfs diff=lfs merge=lfs -text
22
  *.rar filter=lfs diff=lfs merge=lfs -text
 
23
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
24
  *.tar.* filter=lfs diff=lfs merge=lfs -text
25
  *.tflite filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,13 +1,12 @@
1
  ---
2
- title: BeautyRate
3
- emoji: 🐨
4
- colorFrom: pink
5
- colorTo: red
6
  sdk: gradio
7
- sdk_version: 3.29.0
8
  app_file: app.py
9
  pinned: false
10
- license: openrail
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Hot Or Not
3
+ emoji: 🏢
4
+ colorFrom: yellow
5
+ colorTo: gray
6
  sdk: gradio
7
+ sdk_version: 3.4.1
8
  app_file: app.py
9
  pinned: false
 
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import clip
3
+ from PIL import Image
4
+ import gradio as gr
5
+
6
+ device = "cuda" if torch.cuda.is_available() else "cpu"
7
+ model, preprocess = clip.load("ViT-B/32", device=device)
8
+
9
+ def hotornot(image, gender):
10
+ image = Image.fromarray(image.astype("uint8"), "RGB")
11
+
12
+ image = preprocess(image).unsqueeze(0).to(device)
13
+ positive_terms = [f'a hot {gender}', f'a beautiful {gender}', f'an attractive {gender}']
14
+ negative_terms = [f'a gross {gender}', f'an ugly {gender}', f'a hideous {gender}']
15
+
16
+ pairs = list(zip(positive_terms, negative_terms))
17
+
18
+ def evaluate(terms):
19
+ text = clip.tokenize(terms).to(device)
20
+
21
+ with torch.no_grad():
22
+ logits_per_image, logits_per_text = model(image, text)
23
+ probs = logits_per_image.softmax(dim=-1).cpu().numpy()
24
+ return probs[0]
25
+
26
+ probs = [evaluate(pair) for pair in pairs]
27
+
28
+ positive_probs = [prob[0] for prob in probs]
29
+ negative_probs = [prob[1] for prob in probs]
30
+
31
+ hotness_score = round((probs[0][0] - probs[0][1] + 1) * 50, 2)
32
+ beauty_score = round((probs[1][0] - probs[1][1] + 1) * 50, 2)
33
+ attractiveness_score = round((probs[2][0] - probs[2][1] + 1) * 50, 2)
34
+
35
+ hot_score = sum(positive_probs)/len(positive_probs)
36
+ ugly_score = sum(negative_probs)/len(negative_probs)
37
+ composite = ((hot_score - ugly_score)+1) * 50
38
+ composite = round(composite, 2)
39
+ return composite, hotness_score, beauty_score, attractiveness_score
40
+
41
+ iface = gr.Interface(
42
+ fn=hotornot,
43
+ inputs=[
44
+ gr.inputs.Image(label="Image"),
45
+ gr.inputs.Dropdown(
46
+ [
47
+ 'person', 'man', 'woman'
48
+ ],
49
+ default='person',
50
+ )
51
+ ],
52
+ outputs=[
53
+ gr.Textbox(label="Total Hot or Not™ Score"),
54
+ gr.Textbox(label="Hotness Score"),
55
+ gr.Textbox(label="Beauty Score"),
56
+ gr.Textbox(label="Attractiveness Score"),
57
+ ],
58
+ title="Hot or Not",
59
+ description="A simple hot or not app using OpenAI's CLIP model. How it works: the input image is passed to OpenAI's CLIP image captioning model and evaluated for how much it conforms to the model's idea of hotness, beauty, and attractiveness. These values are then combined to produce a composite score on a scale of 0 to 100.",
60
+ )
61
+ iface.launch()
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ torch
2
+ git+https://github.com/openai/CLIP.git
3
+ Pillow
4
+ gradio