about / app.py
meg's picture
meg HF staff
Quick change so this can be verbally shared at TED.
3b66e17 verified
import gradio as gr
from typing import List
from datasets import load_dataset
from paper_tile import paper_tile
papers = load_dataset("society-ethics/papers", split="train")
class Space:
def __init__(self, title, id):
self.title = title
self.id = id
class News:
def __init__(self, title, link):
self.title = title
self.link = link
class Category:
def __init__(self, category_id, title, description, news: List[News] = None, spaces=None):
if news is None:
news = []
if spaces is None:
spaces = []
self.category_id = category_id
self.title = title
self.description = description
self.news = news
self.spaces = spaces
inclusive = Category(
category_id="inclusive",
title="πŸ§‘β€πŸ€β€πŸ§‘ Inclusive",
description="""
These are projects which broaden the scope of who _builds_ and _benefits_ in the machine learning world.
<br><br>
Examples of this can include:
<br><br>
- Curating diverse datasets that increase the representation of underserved groups
- Training language models on languages that aren't yet available on the Hugging Face Hub.
- Creating no-code and low-code frameworks that allow non-technical folk to engage with AI.
""",
news=[
News(
title="πŸš€ Gradio 3.19 - Bugfixes and improved UI/UX for embedded apps",
link="https://twitter.com/Gradio/status/1627702506250805248"
),
News(
title="🧨 Diffusers 0.13 - New pipelines for editing and guiding models",
link="https://twitter.com/multimodalart/status/1627727910801928192"
)
],
spaces=[
Space(
title="Promptist Demo",
id="microsoft/Promptist"
),
Space(
title="MMTAfrica: Multilingual Machine Translation",
id="edaiofficial/mmtafrica"
),
Space(
title="Spanish to Quechua translation",
id="hackathon-pln-es/spanish-to-quechua-translation"
),
]
)
rigorous = Category(
category_id="rigorous",
title="✍️ Rigorous",
description="""
Among the many concerns that go into creating new models is a seemingly simple question: "Does it work?"
<br><br>
Rigorous projects pay special attention to examining failure cases, protecting privacy through security measures, and ensuring that potential users (technical and non-technical) are informed of the project's limitations.
<br><br>
Examples:
<br><br>
- Projects built with models that are well-documented with Model Cards.
- Tools that provide transparency into how a model was trained and how it behaves.
- Evaluations against cutting-edge benchmarks, with results reported against disaggregated sets.
- Demonstrations of models failing across gender, skin type, ethnicity, age or other attributes.
- Techniques for mitigating issues like over-fitting and training data memorization.
- Techniques for detoxifying language models.
""",
news=[
News(
title="Ethics and Society Newsletter #4: Bias in Text-to-Image Models",
link="https://huggingface.co/blog/ethics-soc-4"
),
News(
title="Hugging Face's Open LLM Leaderboard",
link="https://twitter.com/edwardbeeching/status/1656679489433370634"
),
News(
title="Ethics and Society Newsletter #3: Ethical Openness at Hugging Face",
link="https://huggingface.co/blog/ethics-soc-3"
),
News(
title="MIT: These new tools let you see for yourself how biased AI image models are",
link="https://www.technologyreview.com/2023/03/22/1070167/these-news-tool-let-you-see-for-yourself-how-biased-ai-image-models-are/"
),
News(
title="WIRED: Inside the Suspicion Machine",
link="https://www.wired.com/story/welfare-state-algorithms/"
),
News(
title="πŸ—žοΈ AI chatbots are coming to search engines β€” can you trust the results?",
link="https://www.nature.com/articles/d41586-023-00423-4"
),
News(
title="πŸ“‘ Model Cards: Introducing new documentation tools",
link="https://huggingface.co/blog/model-cards"
),
News(
title="πŸ€— Ethics & Society Newsletter #2: Let's talk about bias!",
link="https://huggingface.co/blog/ethics-soc-2"
)
],
spaces=[
Space(
title="Open LLM Leaderboard",
id="HuggingFaceH4/open_llm_leaderboard"
),
Space(
title="A Watermark for Large Language Models",
id="tomg-group-umd/lm-watermarking"
),
Space(
title="Roots Search Tool",
id="bigscience-data/roots-search"
),
Space(
title="Diffusion Bias Explorer",
id="society-ethics/DiffusionBiasExplorer"
),
Space(
title="Disaggregators",
id="society-ethics/disaggregators"
),
Space(
title="Detoxified Language Models",
id="ybelkada/detoxified-lms"
)
]
)
socially_conscious = Category(
category_id="socially conscious",
title="πŸ‘οΈβ€πŸ—¨οΈ Socially Conscious",
description="""
Socially Conscious work shows us how machine learning can support efforts toward a stronger society!
<br><br>
Examples:
<br><br>
- Using machine learning as part of an effort to tackle climate change.
- Building tools to assist with medical research and practice.
- Models for text-to-speech, image captioning, and other tasks aimed at increasing accessibility.
- Creating systems for the digital humanities, such as for Indigenous language revitalization.
""",
news=[
News(
title="πŸ¦“ New dataset: LILA Camera Traps",
link="https://huggingface.co/datasets/society-ethics/lila_camera_traps"
),
News(
title="πŸ§‘β€πŸ”¬ Deep Learning With Proteins",
link="https://huggingface.co/blog/deep-learning-with-proteins"
)
],
spaces=[
Space(
title="Rescue Map: Morocco Earthquake",
id="nt3awnou/Nt3awnou-rescue-map"
),
Space(
title="Climate Q&A 🌍",
id="Ekimetrics/climate-question-answering"
),
Space(
title="Comparing Captioning Models",
id="nielsr/comparing-captioning-models"
),
Space(
title="Whisper Speaker Diarization",
id="vumichien/whisper-speaker-diarization"
),
Space(
title="Speech Recognition from visual lip movement",
id="vumichien/lip_movement_reading"
),
Space(
title="Socratic Models Image Captioning",
id="Geonmo/socratic-models-image-captioning-with-BLOOM"
),
]
)
consentful = Category(
category_id="consentful",
title="🀝 Consentful",
description="""
[What is consentful tech?](https://www.consentfultech.io)
Consentful technology supports the self-determination of people who use and are affected by these technologies.
<br><br>
Examples of this can include:
<br><br>
- Demonstrating a commitment to acquiring data from willing, informed, and appropriately compensated sources.
- Designing systems that respect end-user autonomy, e.g. with privacy-preserving techniques.
- Avoiding extractive, chauvinist, ["dark"](https://www.deceptive.design), and otherwise "unethical" patterns of engagement.
""",
news=[
News(
title="Introducing: πŸ’«StarCoder, a 15B LLM for code with 8k trained only on permissive data",
link="https://twitter.com/BigCodeProject/status/1654174941976068119"
),
News(
title="Spawning lays out plans for letting creators opt out of generative AI training",
link="https://techcrunch.com/2023/05/03/spawning-lays-out-its-plans-for-letting-creators-opt-out-of-generative-ai-training/"
),
News(
title="Creating Privacy Preserving AI with Substra",
link="https://huggingface.co/blog/owkin-substra"
),
News(
title="πŸŽ₯ MoroccoAI webinar - Loubna Ben Allal - 'Building open large models for code'",
link="https://twitter.com/MoroccoAI/status/1629105224848646144"
),
News(
title="The Stack - 3 TB of Permissively Licensed Source Code",
link="https://www.bigcode-project.org/docs/about/the-stack/"
)
],
spaces=[
Space(
title="Federated Learning with Substra",
id="owkin/substra"
),
Space(
title="Does CLIP Know My Face?",
id="AIML-TUDA/does-clip-know-my-face"
),
Space(
title="Sentiment Analysis on Encrypted Data with FHE",
id="zama-fhe/encrypted_sentiment_analysis"
),
Space(
title="SantaCoder: Code Generation",
id="bigcode/santacoder-demo"
),
Space(
title="Data Anonymization in Autonomous Driving",
id="khaclinh/self-driving-anonymization"
),
Space(
title="Raising the Cost of Malicious AI-Powered Image Editing",
id="RamAnanth1/photoguard"
),
]
)
sustainable = Category(
category_id="sustainable",
title="🌎 Sustainable",
description="""
This is work that highlights and explores techniques for making machine learning ecologically sustainable.
<br><br>
Examples
<br><br>
- Tracking emissions from training and running inferences on large language models.
- Quantization and distillation methods to reduce carbon footprints without sacrificing model quality.
""",
news=[
News(
title="πŸ”” New paper: Counting Carbon – Luccioni & Hernandez-Garcia, 2023",
link="https://twitter.com/SashaMTL/status/1626572394130292737"
),
News(
title="PEFT: Parameter-Efficient Fine-Tuning on Low-Resource Hardware",
link="https://huggingface.co/blog/peft"
)
],
spaces=[
Space(
title="Hugging Face Carbon Compare Tool",
id="huggingface/Carbon-Compare"
),
Space(
title="Image Classification with EfficientFormer-L1",
id="adirik/efficientformer"
),
Space(
title="EfficientNetV2 Deepfakes Video Detector",
id="Ron0420/EfficientNetV2_Deepfakes_Video_Detector"
),
]
)
inquisitive = Category(
category_id="inquisitive",
title="πŸ€” Inquisitive",
description="""
Some projects take a radical new approach to concepts which may have become commonplace. These projects, often rooted in critical theory, shine a light on inequities and power structures which challenge the community to rethink its relationship to technology.
<br><br>
Examples:
<br><br>
- Reframing AI and machine learning from Indigenous perspectives.
- Highlighting LGBTQIA2S+ marginalization in AI.
- Critiquing the harms perpetuated by AI systems.
- Discussing the role of "openness" in AI research.
""",
news=[
News(
title="Announcing the NeurIPS Code of Ethics",
link="https://blog.neurips.cc/2023/04/20/announcing-the-neurips-code-of-ethics/"
),
News(
title="πŸŽ™οΈ NPR: Know It All: What Is AI And How Will It Shape The Future?",
link="https://www.npr.org/2023/02/20/1158300530/know-it-all-what-is-ai-and-how-will-it-shape-the-future"
),
News(
title="🦜 DAIR's Stochastic Parrots Day is on March 17",
link="https://twitter.com/emilymbender/status/1627312284392640513"
),
News(
title="🌈 New paper: The Gradient of Generative AI Release – Solaiman, 2023",
link="https://twitter.com/IreneSolaiman/status/1625158317378252800"
),
News(
title="βš–οΈ Diffusers has a brand new Ethical Guidelines doc!",
link="https://github.com/huggingface/diffusers/pull/2330"
)
],
spaces=[
Space(
title="Spanish Gender Neutralizer",
id="hackathon-pln-es/es_nlp_gender_neutralizer"
),
Space(
title="PAIR: Datasets Have Worldviews",
id="merve/dataset-worldviews"
),
]
)
categories = [rigorous, consentful, socially_conscious, sustainable, inclusive, inquisitive]
def news_card(news):
with gr.Box():
with gr.Row(elem_id="news-row"):
gr.Markdown(f"{news.title}")
button = gr.Button(elem_id="article-button", value="Read more πŸ”—")
button.click(fn=None, _js=f"() => window.open('{news.link}')")
def space_card(space):
with gr.Box(elem_id="space-card"):
with gr.Row(elem_id="news-row"):
gr.Markdown(f"{space.title}")
button = gr.Button(elem_id="article-button", value="View πŸ”­")
button.click(fn=None, _js=f"() => window.open('https://hf.space/{space.id}')")
def category_tab(category):
with gr.Tab(label=category.title, elem_id="news-tab"):
with gr.Row():
with gr.Column():
gr.Markdown(category.description, elem_id="margin-top")
with gr.Column():
gr.Markdown("### Hugging Face News πŸ“°")
[news_card(x) for x in category.news]
# with gr.Tab(label="Hugging Face Projects"):
# gr.Markdown("....")
with gr.Tab(label="Spaces"):
with gr.Row(elem_id="spaces-flex"):
[space_card(x) for x in category.spaces]
with gr.Tab(label="πŸ€— Hugging Face Papers"):
with gr.Row(elem_id="spaces-flex"):
[paper_tile(p) for p in papers.filter(lambda p: category.category_id in p["tags"])]
# with gr.Tab(label="Models - Coming Soon!"):
# gr.Markdown(elem_id="margin-top", value="#### Check back soon for featured models πŸ€—")
# with gr.Tab(label="Datasets - Coming Soon!"):
# gr.Markdown(elem_id="margin-top", value="#### Check back soon for featured datasets πŸ€—")
with gr.Blocks(css="#margin-top {margin-top: 15px} #center {text-align: center;} #news-tab {padding: 15px;} #news-tab h3 {margin: 0px; text-align: center;} #news-tab p {margin: 0px;} #article-button {flex-grow: initial;} #news-row {align-items: center;} #spaces-flex {flex-wrap: wrap; justify-content: space-around;} #space-card { display: flex; min-width: calc(90% / 3); max-width:calc(100% / 3); box-sizing: border-box;} #event-tabs {margin-top: 0px;} #spaces-flex > #paper-tile {min-width: 30%; max-width: 30%;}") as demo:
with gr.Row(elem_id="center"):
gr.Markdown("# Ethics & Society at Hugging Face")
gr.Markdown("""
At Hugging Face, we are committed to operationalizing ethics at the cutting-edge of machine learning. This page is dedicated to highlighting projects – inside and outside Hugging Face – in order to encourage and support more ethical development and use of AI. We wish to foster ongoing conversations of ethics and values; this means that this page will evolve over time, and your feedback is invaluable. Please open up an issue in the [Community tab](https://huggingface.co/spaces/society-ethics/about/discussions) to share your thoughts!
""")
with gr.Accordion(label="Events", open=False):
with gr.Tab(label="Upcoming Events"):
with gr.Row(elem_id="margin-top"):
gr.Markdown("We'll be announcing more events soon!")
with gr.Tab(label="Past Events"):
with gr.Row(elem_id="margin-top"):
with gr.Column(scale=1):
gr.Image(value="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/making-intelligence-banner.png", show_label=False)
with gr.Column(scale=2):
with gr.Tabs(elem_id="event-tabs"):
with gr.Tab("About the Event"):
gr.Markdown("""
For our inaugural Ethics & Society Q&A, we're welcoming [Borhane Blili-Hamelin, PhD](https://borhane.xyz), and [Leif Hancox-Li, PhD](https://boltzmann-brain.github.io)!
Come discuss their recent paper (["Making Intelligence: Ethical Values in IQ and ML Benchmarks"](https://arxiv.org/abs/2209.00692)), learn about the value-laden aspects of ML benchmarks, and share your ideas on how we can apply these lessons to our work πŸ€—
Join the Discord and RSVP to the event: [Click me!](https://discord.com/events/879548962464493619/1082360845113229332) πŸš€
**Date:** March 13th 2023, 9:00 AM Pacific Time, **Location:** The Hugging Face Discord at #ethics-and-society
""")
with gr.Tab("Speaker Bios"):
gr.Markdown("""
### About Borhane Blili-Hamelin, PhD (he/him)
I’m a consultant, researcher, and organizer focused on AI ethics. As a consultant with BABL AI, I help organizations mitigate harm through AI risk management and auditing. I build cross-disciplinary research projects on the risks and values embedded in AI systems. I also love participatory problem-solving and community-driven projects. I'm Ethics and Performance Lead at AVID, founded Accountability Case Labs, and am co-director of Open Post Academics. I'm a Mozilla Festival alum: former TAIWG Project Lead and Wrangler. I strive to make AI governance more cross-disciplinary, reflective and empowering for impacted communities.
I have a PhD in philosophy from Columbia University.
I’m a QuΓ©bec expat living in Brooklyn, NY!
#### Links
- Personal website: [borhane.xyz](https://borhane.xyz)
- Linkedin: [linkedin.com/in/borhane](https://www.linkedin.com/in/borhane/)
- Twitter: [@Borhane_B_H](https://twitter.com/Borhane_B_H)
### About Leif Hancox-Li, PhD (he/they)
I’m a data scientist who does interdisciplinary [research](https://boltzmann-brain.github.io/papers) on responsible AI and helps data science teams make their models more explainable. I excel at bringing a humanistic perspective to technical issues while also having the skills to implement technical solutions that take social values into account. My research has won best paper awards at both [FAccT](https://twitter.com/FAccTConference/status/1369315183143903237?s=20) and the [Philosophy of Science Association](https://philsci.org/ernest_nagel_early-career_scho.php).
Prior to this, I was a technical writer for a variety of software products, ranging from MLOps to REST APIs to complicated enterprise GUIs. An even longer time ago, I got a PhD in philosophy after stints in computer vision and physics.
#### Links
- Personal website: [boltzmann-brain.github.io](https://boltzmann-brain.github.io)
- Linkedin: [https://www.linkedin.com/in/leif-hancox-li-1a6a7a132/](https://www.linkedin.com/in/leif-hancox-li-1a6a7a132/)
- Twitter: [@struthious](https://twitter.com/struthious)
""")
with gr.Tab("Paper Abstract"):
gr.Markdown("""
Read the full paper at: [https://arxiv.org/abs/2209.00692](https://arxiv.org/abs/2209.00692)
> In recent years, ML researchers have wrestled with defining and improving machine learning (ML) benchmarks and datasets. In parallel, some have trained a critical lens on the ethics of dataset creation and ML research. In this position paper, we highlight the entanglement of ethics with seemingly ``technical'' or ``scientific'' decisions about the design of ML benchmarks. Our starting point is the existence of multiple overlooked structural similarities between human intelligence benchmarks and ML benchmarks. Both types of benchmarks set standards for describing, evaluating, and comparing performance on tasks relevant to intelligence -- standards that many scholars of human intelligence have long recognized as value-laden. We use perspectives from feminist philosophy of science on IQ benchmarks and thick concepts in social science to argue that values need to be considered and documented when creating ML benchmarks. It is neither possible nor desirable to avoid this choice by creating value-neutral benchmarks. Finally, we outline practical recommendations for ML benchmark research ethics and ethics review.
""")
with gr.Accordion(label="Visit us over on the Hugging Face Discord!", open=False):
gr.Markdown("""
Follow these steps to join the discussion:
1. Go to [hf.co/join/discord](https://hf.co/join/discord) to join the Discord server.
2. Once you've registered, go to the `#role-assignment` channel.
3. Select the "Open Science" role.
4. Head over to `#ethics-and-society` to join the conversation πŸ₯³
""", elem_id="margin-top")
gr.Markdown("""
### NEW
Check out our collection on [Provenance, Watermarking, and Deepfake Detection](https://huggingface.co/collections/society-ethics/provenance-watermarking-and-deepfake-detection-65c6792b0831983147bb7578) -- especially important to know about with potential malicious use of generative AI in coming elections.
### What does ethical AI look like?
We analyzed the submissions on Hugging Face Spaces and put together a set of 6 high-level categories for describing ethical aspects of machine learning work. Visit each tab to learn more about each category and to see what Hugging Face and its community have been up to! Is there a Space that you'd like to see featured? [Submit it here πŸš€](https://huggingface.co/spaces/society-ethics/featured-spaces-submissions)
""")
with gr.Column():
[category_tab(x) for x in categories]
demo.launch()