Yeonchan Ahn commited on
Commit
09b4d4d
1 Parent(s): 66349cb
Files changed (4) hide show
  1. README.md +44 -9
  2. alignment_and_uniformity.py +90 -0
  3. app.py +6 -0
  4. tests.py +8 -0
README.md CHANGED
@@ -1,15 +1,50 @@
1
  ---
2
- title: Alignment And Uniformity
3
- emoji: 😻
4
- colorFrom: green
5
- colorTo: red
 
 
 
6
  sdk: gradio
7
- sdk_version: 5.3.0
8
  app_file: app.py
9
  pinned: false
10
- license: apache-2.0
11
- short_description: Metrics for Alignment and Uniformity on the Hypersphere.
12
  ---
13
- test
14
 
15
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: Alignment and Uniformity
3
+ datasets:
4
+ -
5
+ tags:
6
+ - evaluate
7
+ - metric
8
+ description: "TODO: add a description here"
9
  sdk: gradio
10
+ sdk_version: 3.19.1
11
  app_file: app.py
12
  pinned: false
 
 
13
  ---
 
14
 
15
+ # Metric Card for Alignment and Uniformity
16
+
17
+ ***Module Card Instructions:*** *Fill out the following subsections. Feel free to take a look at existing metric cards if you'd like examples.*
18
+
19
+ ## Metric Description
20
+ *Give a brief overview of this metric, including what task(s) it is usually used for, if any.*
21
+
22
+ ## How to Use
23
+ *Give general statement of how to use the metric*
24
+
25
+ *Provide simplest possible example for using the metric*
26
+
27
+ ### Inputs
28
+ *List all input arguments in the format below*
29
+ - **input_field** *(type): Definition of input, with explanation if necessary. State any default value(s).*
30
+
31
+ ### Output Values
32
+
33
+ *Explain what this metric outputs and provide an example of what the metric output looks like. Modules should return a dictionary with one or multiple key-value pairs, e.g. {"bleu" : 6.02}*
34
+
35
+ *State the range of possible values that the metric's output can take, as well as what in that range is considered good. For example: "This metric can take on any value between 0 and 100, inclusive. Higher scores are better."*
36
+
37
+ #### Values from Popular Papers
38
+ *Give examples, preferrably with links to leaderboards or publications, to papers that have reported this metric, along with the values they have reported.*
39
+
40
+ ### Examples
41
+ *Give code examples of the metric being used. Try to include examples that clear up any potential ambiguity left from the metric description above. If possible, provide a range of examples that show both typical and atypical results, as well as examples where a variety of input parameters are passed.*
42
+
43
+ ## Limitations and Bias
44
+ *Note any known limitations or biases that the metric has, with links and references if possible.*
45
+
46
+ ## Citation
47
+ *Cite the source where this metric was introduced.*
48
+
49
+ ## Further References
50
+ *Add any useful further references.*
alignment_and_uniformity.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+ import evaluate
3
+ from typing import List
4
+ import torch
5
+
6
+
7
+ _DESCRIPTION = """
8
+ Quantifying encoder feature distribution properties, Alignment and Uniformity on the Hypersphere.
9
+ (https://github.com/ssnl/align_uniform)
10
+ """
11
+
12
+ _KWARGS_DESCRIPTION = """
13
+ Args:
14
+ xs (`list` of a list of `int`): a group of embeddings
15
+ ys (`list` of `int`): the other group of embeddings paired with the ys
16
+
17
+ Returns:
18
+ "align_loss": float(align_loss_val),
19
+ "x_unif_loss": float(x_unif_loss_v),
20
+ "y_unif_loss": float(y_unif_loss_v),
21
+ "unif_loss": float(unif_loss)
22
+
23
+ Examples:
24
+
25
+ Example 1-A simple example
26
+ >>> metrics = evaluate.load("ahnyeonchan/Alignment-and-Uniformity")
27
+ >>> results = metrics.compute(xs=[[1.0, 1.0], [0.0, 1.0]], ys=[[1.0, 1.0], [0.0, 1.0]])
28
+ >>> print(results)
29
+ {'align_loss': 0.0, 'x_unif_loss': -2.0, 'y_unif_loss': -2.0, 'unif_loss': -2.0}
30
+ """
31
+
32
+ _CITATION = """"""
33
+
34
+
35
+ def align_loss(x, y, alpha=2):
36
+ return (x - y).norm(p=2, dim=1).pow(alpha).mean()
37
+
38
+
39
+ def uniform_loss(x, t=2):
40
+ return torch.pdist(x, p=2).pow(2).mul(-t).exp().mean().log()
41
+
42
+
43
+ @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
44
+ class AlignUniform(evaluate.Metric):
45
+ def __init__(self, align_alpha: float = 2.0, unif_t: float = 2.0, *args, **kwargs):
46
+ super(AlignUniform, self).__init__(*args, **kwargs)
47
+ self.align_alpha = align_alpha
48
+ self.unif_t = unif_t
49
+
50
+ def _info(self):
51
+ return evaluate.MetricInfo(
52
+ description=_DESCRIPTION,
53
+ citation=_CITATION,
54
+ inputs_description=_KWARGS_DESCRIPTION,
55
+ features=datasets.Features(
56
+ {
57
+ "xs": datasets.Sequence(datasets.Value("float32")),
58
+ "ys": datasets.Sequence(datasets.Value("float32")),
59
+ }
60
+ ),
61
+ reference_urls=[],
62
+ )
63
+
64
+ def _compute(self, xs: List[List], ys: List[List]):
65
+
66
+ if isinstance(xs, torch.Tensor):
67
+ xs = torch.Tensor(xs)
68
+ elif isinstance(ys, list):
69
+ xs = torch.Tensor(xs)
70
+ else:
71
+ raise NotImplementedError()
72
+
73
+ if isinstance(ys, torch.Tensor):
74
+ ys = torch.Tensor(ys)
75
+ elif isinstance(ys, list):
76
+ ys = torch.Tensor(ys)
77
+ else:
78
+ raise NotImplementedError()
79
+
80
+ align_loss_val = align_loss(xs, ys, self.align_alpha)
81
+ x_unif_loss_v = uniform_loss(xs, t=self.unif_t)
82
+ y_unif_loss_v = uniform_loss(ys, t=self.unif_t)
83
+ unif_loss = (x_unif_loss_v + y_unif_loss_v) / 2
84
+
85
+ return {
86
+ "align_loss": float(align_loss_val),
87
+ "x_unif_loss": float(x_unif_loss_v),
88
+ "y_unif_loss": float(y_unif_loss_v),
89
+ "unif_loss": float(unif_loss)
90
+ }
app.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import evaluate
2
+ from evaluate.utils import launch_gradio_widget
3
+
4
+
5
+ module = evaluate.load("ahnyeonchan/Alignment-and-Uniformity")
6
+ launch_gradio_widget(module)
tests.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ test_cases = [
2
+ {
3
+ "xs": [[1.0, 1.0], [0.0, 1.0]],
4
+ "ys": [[1.0, 1.0], [0.0, 1.0]],
5
+ "result": {'align_loss': 0.0, 'x_unif_loss': -2.0, 'y_unif_loss': -2.0, 'unif_loss': -2.0}
6
+ }
7
+
8
+ ]